1 //
    2 // Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
    4 // Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
    5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    6 //
    7 // This code is free software; you can redistribute it and/or modify it
    8 // under the terms of the GNU General Public License version 2 only, as
    9 // published by the Free Software Foundation.
   10 //
   11 // This code is distributed in the hope that it will be useful, but WITHOUT
   12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14 // version 2 for more details (a copy is included in the LICENSE file that
   15 // accompanied this code).
   16 //
   17 // You should have received a copy of the GNU General Public License version
   18 // 2 along with this work; if not, write to the Free Software Foundation,
   19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20 //
   21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   22 // or visit www.oracle.com if you need additional information or have any
   23 // questions.
   24 //
   25 //
   26 
   27 // RISCV Architecture Description File
   28 
   29 //----------REGISTER DEFINITION BLOCK------------------------------------------
   30 // This information is used by the matcher and the register allocator to
   31 // describe individual registers and classes of registers within the target
   32 // architecture.
   33 
   34 register %{
   35 //----------Architecture Description Register Definitions----------------------
   36 // General Registers
   37 // "reg_def"  name ( register save type, C convention save type,
   38 //                   ideal register type, encoding );
   39 // Register Save Types:
   40 //
   41 // NS  = No-Save:       The register allocator assumes that these registers
   42 //                      can be used without saving upon entry to the method, &
   43 //                      that they do not need to be saved at call sites.
   44 //
   45 // SOC = Save-On-Call:  The register allocator assumes that these registers
   46 //                      can be used without saving upon entry to the method,
   47 //                      but that they must be saved at call sites.
   48 //
   49 // SOE = Save-On-Entry: The register allocator assumes that these registers
   50 //                      must be saved before using them upon entry to the
   51 //                      method, but they do not need to be saved at call
   52 //                      sites.
   53 //
   54 // AS  = Always-Save:   The register allocator assumes that these registers
   55 //                      must be saved before using them upon entry to the
   56 //                      method, & that they must be saved at call sites.
   57 //
   58 // Ideal Register Type is used to determine how to save & restore a
   59 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   60 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   61 //
   62 // The encoding number is the actual bit-pattern placed into the opcodes.
   63 
   64 // We must define the 64 bit int registers in two 32 bit halves, the
   65 // real lower register and a virtual upper half register. upper halves
   66 // are used by the register allocator but are not actually supplied as
   67 // operands to memory ops.
   68 //
   69 // follow the C1 compiler in making registers
   70 //
   71 //   x7, x9-x17, x27-x31 volatile (caller save)
   72 //   x0-x4, x8, x23 system (no save, no allocate)
   73 //   x5-x6 non-allocatable (so we can use them as temporary regs)
   74 
   75 //
   76 // as regards Java usage. we don't use any callee save registers
   77 // because this makes it difficult to de-optimise a frame (see comment
   78 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   79 //
   80 
   81 // General Registers
   82 
   83 reg_def R0      ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()         ); // zr
   84 reg_def R0_H    ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()->next() );
   85 reg_def R1      ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()         ); // ra
   86 reg_def R1_H    ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()->next() );
   87 reg_def R2      ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()         ); // sp
   88 reg_def R2_H    ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()->next() );
   89 reg_def R3      ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()         ); // gp
   90 reg_def R3_H    ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()->next() );
   91 reg_def R4      ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()         ); // tp
   92 reg_def R4_H    ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()->next() );
   93 reg_def R7      ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()         );
   94 reg_def R7_H    ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()->next() );
   95 reg_def R8      ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()         ); // fp
   96 reg_def R8_H    ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()->next() );
   97 reg_def R9      ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()         );
   98 reg_def R9_H    ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()->next() );
   99 reg_def R10     ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()        );
  100 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next());
  101 reg_def R11     ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()        );
  102 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next());
  103 reg_def R12     ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()        );
  104 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next());
  105 reg_def R13     ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()        );
  106 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next());
  107 reg_def R14     ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()        );
  108 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next());
  109 reg_def R15     ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()        );
  110 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next());
  111 reg_def R16     ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()        );
  112 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next());
  113 reg_def R17     ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()        );
  114 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next());
  115 reg_def R18     ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()        );
  116 reg_def R18_H   ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next());
  117 reg_def R19     ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()        );
  118 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next());
  119 reg_def R20     ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()        ); // caller esp
  120 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next());
  121 reg_def R21     ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()        );
  122 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next());
  123 reg_def R22     ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()        );
  124 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next());
  125 reg_def R23     ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()        ); // java thread
  126 reg_def R23_H   ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()->next());
  127 reg_def R24     ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()        );
  128 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next());
  129 reg_def R25     ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()        );
  130 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next());
  131 reg_def R26     ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()        );
  132 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next());
  133 reg_def R27     ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()        ); // heapbase
  134 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next());
  135 reg_def R28     ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()        );
  136 reg_def R28_H   ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next());
  137 reg_def R29     ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()        );
  138 reg_def R29_H   ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next());
  139 reg_def R30     ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()        );
  140 reg_def R30_H   ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next());
  141 reg_def R31     ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()        );
  142 reg_def R31_H   ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next());
  143 
  144 // ----------------------------
  145 // Float/Double Registers
  146 // ----------------------------
  147 
  148 // Double Registers
  149 
  150 // The rules of ADL require that double registers be defined in pairs.
  151 // Each pair must be two 32-bit values, but not necessarily a pair of
  152 // single float registers. In each pair, ADLC-assigned register numbers
  153 // must be adjacent, with the lower number even. Finally, when the
  154 // CPU stores such a register pair to memory, the word associated with
  155 // the lower ADLC-assigned number must be stored to the lower address.
  156 
  157 // RISCV has 32 floating-point registers. Each can store a single
  158 // or double precision floating-point value.
  159 
  160 // for Java use float registers f0-f31 are always save on call whereas
  161 // the platform ABI treats f8-f9 and f18-f27 as callee save). Other
  162 // float registers are SOC as per the platform spec
  163 
  164 reg_def F0    ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()          );
  165 reg_def F0_H  ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()->next()  );
  166 reg_def F1    ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()          );
  167 reg_def F1_H  ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()->next()  );
  168 reg_def F2    ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()          );
  169 reg_def F2_H  ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()->next()  );
  170 reg_def F3    ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()          );
  171 reg_def F3_H  ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()->next()  );
  172 reg_def F4    ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()          );
  173 reg_def F4_H  ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()->next()  );
  174 reg_def F5    ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()          );
  175 reg_def F5_H  ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()->next()  );
  176 reg_def F6    ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()          );
  177 reg_def F6_H  ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()->next()  );
  178 reg_def F7    ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()          );
  179 reg_def F7_H  ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()->next()  );
  180 reg_def F8    ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()          );
  181 reg_def F8_H  ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()->next()  );
  182 reg_def F9    ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()          );
  183 reg_def F9_H  ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()->next()  );
  184 reg_def F10   ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()         );
  185 reg_def F10_H ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()->next() );
  186 reg_def F11   ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()         );
  187 reg_def F11_H ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()->next() );
  188 reg_def F12   ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()         );
  189 reg_def F12_H ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()->next() );
  190 reg_def F13   ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()         );
  191 reg_def F13_H ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()->next() );
  192 reg_def F14   ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()         );
  193 reg_def F14_H ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()->next() );
  194 reg_def F15   ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()         );
  195 reg_def F15_H ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()->next() );
  196 reg_def F16   ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()         );
  197 reg_def F16_H ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()->next() );
  198 reg_def F17   ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()         );
  199 reg_def F17_H ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()->next() );
  200 reg_def F18   ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()         );
  201 reg_def F18_H ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()->next() );
  202 reg_def F19   ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()         );
  203 reg_def F19_H ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()->next() );
  204 reg_def F20   ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()         );
  205 reg_def F20_H ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()->next() );
  206 reg_def F21   ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()         );
  207 reg_def F21_H ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()->next() );
  208 reg_def F22   ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()         );
  209 reg_def F22_H ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()->next() );
  210 reg_def F23   ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()         );
  211 reg_def F23_H ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()->next() );
  212 reg_def F24   ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()         );
  213 reg_def F24_H ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()->next() );
  214 reg_def F25   ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()         );
  215 reg_def F25_H ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()->next() );
  216 reg_def F26   ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()         );
  217 reg_def F26_H ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()->next() );
  218 reg_def F27   ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()         );
  219 reg_def F27_H ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()->next() );
  220 reg_def F28   ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()         );
  221 reg_def F28_H ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()->next() );
  222 reg_def F29   ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()         );
  223 reg_def F29_H ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()->next() );
  224 reg_def F30   ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()         );
  225 reg_def F30_H ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()->next() );
  226 reg_def F31   ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()         );
  227 reg_def F31_H ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()->next() );
  228 
  229 // ----------------------------
  230 // Vector Registers
  231 // ----------------------------
  232 
  233 // For RVV vector registers, we simply extend vector register size to 4
  234 // 'logical' slots. This is nominally 128 bits but it actually covers
  235 // all possible 'physical' RVV vector register lengths from 128 ~ 1024
  236 // bits. The 'physical' RVV vector register length is detected during
  237 // startup, so the register allocator is able to identify the correct
  238 // number of bytes needed for an RVV spill/unspill.
  239 
  240 reg_def V0    ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()           );
  241 reg_def V0_H  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next()   );
  242 reg_def V0_J  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(2)  );
  243 reg_def V0_K  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(3)  );
  244 
  245 reg_def V1    ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()           );
  246 reg_def V1_H  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next()   );
  247 reg_def V1_J  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(2)  );
  248 reg_def V1_K  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(3)  );
  249 
  250 reg_def V2    ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()           );
  251 reg_def V2_H  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next()   );
  252 reg_def V2_J  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(2)  );
  253 reg_def V2_K  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(3)  );
  254 
  255 reg_def V3    ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()           );
  256 reg_def V3_H  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next()   );
  257 reg_def V3_J  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(2)  );
  258 reg_def V3_K  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(3)  );
  259 
  260 reg_def V4    ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()           );
  261 reg_def V4_H  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next()   );
  262 reg_def V4_J  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(2)  );
  263 reg_def V4_K  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(3)  );
  264 
  265 reg_def V5    ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()           );
  266 reg_def V5_H  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next()   );
  267 reg_def V5_J  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(2)  );
  268 reg_def V5_K  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(3)  );
  269 
  270 reg_def V6    ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()           );
  271 reg_def V6_H  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next()   );
  272 reg_def V6_J  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(2)  );
  273 reg_def V6_K  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(3)  );
  274 
  275 reg_def V7    ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()           );
  276 reg_def V7_H  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next()   );
  277 reg_def V7_J  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(2)  );
  278 reg_def V7_K  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(3)  );
  279 
  280 reg_def V8    ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()           );
  281 reg_def V8_H  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next()   );
  282 reg_def V8_J  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(2)  );
  283 reg_def V8_K  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(3)  );
  284 
  285 reg_def V9    ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()           );
  286 reg_def V9_H  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next()   );
  287 reg_def V9_J  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(2)  );
  288 reg_def V9_K  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(3)  );
  289 
  290 reg_def V10   ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()          );
  291 reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next()  );
  292 reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) );
  293 reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) );
  294 
  295 reg_def V11   ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()          );
  296 reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next()  );
  297 reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) );
  298 reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) );
  299 
  300 reg_def V12   ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()          );
  301 reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next()  );
  302 reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) );
  303 reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) );
  304 
  305 reg_def V13   ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()          );
  306 reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next()  );
  307 reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) );
  308 reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) );
  309 
  310 reg_def V14   ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()          );
  311 reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next()  );
  312 reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) );
  313 reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) );
  314 
  315 reg_def V15   ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()          );
  316 reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next()  );
  317 reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) );
  318 reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) );
  319 
  320 reg_def V16   ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()          );
  321 reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next()  );
  322 reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) );
  323 reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) );
  324 
  325 reg_def V17   ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()          );
  326 reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next()  );
  327 reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) );
  328 reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) );
  329 
  330 reg_def V18   ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()          );
  331 reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next()  );
  332 reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) );
  333 reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) );
  334 
  335 reg_def V19   ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()          );
  336 reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next()  );
  337 reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) );
  338 reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) );
  339 
  340 reg_def V20   ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()          );
  341 reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next()  );
  342 reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) );
  343 reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) );
  344 
  345 reg_def V21   ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()          );
  346 reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next()  );
  347 reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) );
  348 reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) );
  349 
  350 reg_def V22   ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()          );
  351 reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next()  );
  352 reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) );
  353 reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) );
  354 
  355 reg_def V23   ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()          );
  356 reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next()  );
  357 reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) );
  358 reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) );
  359 
  360 reg_def V24   ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()          );
  361 reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next()  );
  362 reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) );
  363 reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) );
  364 
  365 reg_def V25   ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()          );
  366 reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next()  );
  367 reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) );
  368 reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) );
  369 
  370 reg_def V26   ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()          );
  371 reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next()  );
  372 reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) );
  373 reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) );
  374 
  375 reg_def V27   ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()          );
  376 reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next()  );
  377 reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) );
  378 reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) );
  379 
  380 reg_def V28   ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()          );
  381 reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next()  );
  382 reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) );
  383 reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) );
  384 
  385 reg_def V29   ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()          );
  386 reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next()  );
  387 reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) );
  388 reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) );
  389 
  390 reg_def V30   ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()          );
  391 reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next()  );
  392 reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) );
  393 reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) );
  394 
  395 reg_def V31   ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()          );
  396 reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next()  );
  397 reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) );
  398 reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) );
  399 
  400 // ----------------------------
  401 // Special Registers
  402 // ----------------------------
  403 
  404 // On riscv, the physical flag register is missing, so we use t1 instead,
  405 // to bridge the RegFlag semantics in share/opto
  406 
  407 reg_def RFLAGS   (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg()        );
  408 
  409 // Specify priority of register selection within phases of register
  410 // allocation.  Highest priority is first.  A useful heuristic is to
  411 // give registers a low priority when they are required by machine
  412 // instructions, like EAX and EDX on I486, and choose no-save registers
  413 // before save-on-call, & save-on-call before save-on-entry.  Registers
  414 // which participate in fixed calling sequences should come last.
  415 // Registers which are used as pairs must fall on an even boundary.
  416 
  417 alloc_class chunk0(
  418     // volatiles
  419     R7,  R7_H,
  420     R28, R28_H,
  421     R29, R29_H,
  422     R30, R30_H,
  423     R31, R31_H,
  424 
  425     // arg registers
  426     R10, R10_H,
  427     R11, R11_H,
  428     R12, R12_H,
  429     R13, R13_H,
  430     R14, R14_H,
  431     R15, R15_H,
  432     R16, R16_H,
  433     R17, R17_H,
  434 
  435     // non-volatiles
  436     R9,  R9_H,
  437     R18, R18_H,
  438     R19, R19_H,
  439     R20, R20_H,
  440     R21, R21_H,
  441     R22, R22_H,
  442     R24, R24_H,
  443     R25, R25_H,
  444     R26, R26_H,
  445 
  446     // non-allocatable registers
  447     R23, R23_H, // java thread
  448     R27, R27_H, // heapbase
  449     R4,  R4_H,  // thread
  450     R8,  R8_H,  // fp
  451     R0,  R0_H,  // zero
  452     R1,  R1_H,  // ra
  453     R2,  R2_H,  // sp
  454     R3,  R3_H,  // gp
  455 );
  456 
  457 alloc_class chunk1(
  458 
  459     // no save
  460     F0,  F0_H,
  461     F1,  F1_H,
  462     F2,  F2_H,
  463     F3,  F3_H,
  464     F4,  F4_H,
  465     F5,  F5_H,
  466     F6,  F6_H,
  467     F7,  F7_H,
  468     F28, F28_H,
  469     F29, F29_H,
  470     F30, F30_H,
  471     F31, F31_H,
  472 
  473     // arg registers
  474     F10, F10_H,
  475     F11, F11_H,
  476     F12, F12_H,
  477     F13, F13_H,
  478     F14, F14_H,
  479     F15, F15_H,
  480     F16, F16_H,
  481     F17, F17_H,
  482 
  483     // non-volatiles
  484     F8,  F8_H,
  485     F9,  F9_H,
  486     F18, F18_H,
  487     F19, F19_H,
  488     F20, F20_H,
  489     F21, F21_H,
  490     F22, F22_H,
  491     F23, F23_H,
  492     F24, F24_H,
  493     F25, F25_H,
  494     F26, F26_H,
  495     F27, F27_H,
  496 );
  497 
  498 alloc_class chunk2(
  499     V0, V0_H, V0_J, V0_K,
  500     V1, V1_H, V1_J, V1_K,
  501     V2, V2_H, V2_J, V2_K,
  502     V3, V3_H, V3_J, V3_K,
  503     V4, V4_H, V4_J, V4_K,
  504     V5, V5_H, V5_J, V5_K,
  505     V6, V6_H, V6_J, V6_K,
  506     V7, V7_H, V7_J, V7_K,
  507     V8, V8_H, V8_J, V8_K,
  508     V9, V9_H, V9_J, V9_K,
  509     V10, V10_H, V10_J, V10_K,
  510     V11, V11_H, V11_J, V11_K,
  511     V12, V12_H, V12_J, V12_K,
  512     V13, V13_H, V13_J, V13_K,
  513     V14, V14_H, V14_J, V14_K,
  514     V15, V15_H, V15_J, V15_K,
  515     V16, V16_H, V16_J, V16_K,
  516     V17, V17_H, V17_J, V17_K,
  517     V18, V18_H, V18_J, V18_K,
  518     V19, V19_H, V19_J, V19_K,
  519     V20, V20_H, V20_J, V20_K,
  520     V21, V21_H, V21_J, V21_K,
  521     V22, V22_H, V22_J, V22_K,
  522     V23, V23_H, V23_J, V23_K,
  523     V24, V24_H, V24_J, V24_K,
  524     V25, V25_H, V25_J, V25_K,
  525     V26, V26_H, V26_J, V26_K,
  526     V27, V27_H, V27_J, V27_K,
  527     V28, V28_H, V28_J, V28_K,
  528     V29, V29_H, V29_J, V29_K,
  529     V30, V30_H, V30_J, V30_K,
  530     V31, V31_H, V31_J, V31_K,
  531 );
  532 
  533 alloc_class chunk3(RFLAGS);
  534 
  535 //----------Architecture Description Register Classes--------------------------
  536 // Several register classes are automatically defined based upon information in
  537 // this architecture description.
  538 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  539 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  540 //
  541 
  542 // Class for all 32 bit general purpose registers
  543 reg_class all_reg32(
  544     R0,
  545     R1,
  546     R2,
  547     R3,
  548     R4,
  549     R7,
  550     R8,
  551     R9,
  552     R10,
  553     R11,
  554     R12,
  555     R13,
  556     R14,
  557     R15,
  558     R16,
  559     R17,
  560     R18,
  561     R19,
  562     R20,
  563     R21,
  564     R22,
  565     R23,
  566     R24,
  567     R25,
  568     R26,
  569     R27,
  570     R28,
  571     R29,
  572     R30,
  573     R31
  574 );
  575 
  576 // Class for any 32 bit integer registers (excluding zr)
  577 reg_class any_reg32 %{
  578   return _ANY_REG32_mask;
  579 %}
  580 
  581 // Singleton class for R10 int register
  582 reg_class int_r10_reg(R10);
  583 
  584 // Singleton class for R12 int register
  585 reg_class int_r12_reg(R12);
  586 
  587 // Singleton class for R13 int register
  588 reg_class int_r13_reg(R13);
  589 
  590 // Singleton class for R14 int register
  591 reg_class int_r14_reg(R14);
  592 
  593 // Class for all long integer registers
  594 reg_class all_reg(
  595     R0,  R0_H,
  596     R1,  R1_H,
  597     R2,  R2_H,
  598     R3,  R3_H,
  599     R4,  R4_H,
  600     R7,  R7_H,
  601     R8,  R8_H,
  602     R9,  R9_H,
  603     R10, R10_H,
  604     R11, R11_H,
  605     R12, R12_H,
  606     R13, R13_H,
  607     R14, R14_H,
  608     R15, R15_H,
  609     R16, R16_H,
  610     R17, R17_H,
  611     R18, R18_H,
  612     R19, R19_H,
  613     R20, R20_H,
  614     R21, R21_H,
  615     R22, R22_H,
  616     R23, R23_H,
  617     R24, R24_H,
  618     R25, R25_H,
  619     R26, R26_H,
  620     R27, R27_H,
  621     R28, R28_H,
  622     R29, R29_H,
  623     R30, R30_H,
  624     R31, R31_H
  625 );
  626 
  627 // Class for all long integer registers (excluding zr)
  628 reg_class any_reg %{
  629   return _ANY_REG_mask;
  630 %}
  631 
  632 // Class for non-allocatable 32 bit registers
  633 reg_class non_allocatable_reg32(
  634     R0,                       // zr
  635     R1,                       // ra
  636     R2,                       // sp
  637     R3,                       // gp
  638     R4,                       // tp
  639     R23                       // java thread
  640 );
  641 
  642 // Class for non-allocatable 64 bit registers
  643 reg_class non_allocatable_reg(
  644     R0,  R0_H,                // zr
  645     R1,  R1_H,                // ra
  646     R2,  R2_H,                // sp
  647     R3,  R3_H,                // gp
  648     R4,  R4_H,                // tp
  649     R23, R23_H                // java thread
  650 );
  651 
  652 // Class for all non-special integer registers
  653 reg_class no_special_reg32 %{
  654   return _NO_SPECIAL_REG32_mask;
  655 %}
  656 
  657 // Class for all non-special long integer registers
  658 reg_class no_special_reg %{
  659   return _NO_SPECIAL_REG_mask;
  660 %}
  661 
  662 reg_class ptr_reg %{
  663   return _PTR_REG_mask;
  664 %}
  665 
  666 // Class for all non_special pointer registers
  667 reg_class no_special_ptr_reg %{
  668   return _NO_SPECIAL_PTR_REG_mask;
  669 %}
  670 
  671 // Class for all non_special pointer registers (excluding fp)
  672 reg_class no_special_no_fp_ptr_reg %{
  673   return _NO_SPECIAL_NO_FP_PTR_REG_mask;
  674 %}
  675 
  676 // Class for 64 bit register r10
  677 reg_class r10_reg(
  678     R10, R10_H
  679 );
  680 
  681 // Class for 64 bit register r11
  682 reg_class r11_reg(
  683     R11, R11_H
  684 );
  685 
  686 // Class for 64 bit register r12
  687 reg_class r12_reg(
  688     R12, R12_H
  689 );
  690 
  691 // Class for 64 bit register r13
  692 reg_class r13_reg(
  693     R13, R13_H
  694 );
  695 
  696 // Class for 64 bit register r14
  697 reg_class r14_reg(
  698     R14, R14_H
  699 );
  700 
  701 // Class for 64 bit register r15
  702 reg_class r15_reg(
  703     R15, R15_H
  704 );
  705 
  706 // Class for 64 bit register r16
  707 reg_class r16_reg(
  708     R16, R16_H
  709 );
  710 
  711 // Class for method register
  712 reg_class method_reg(
  713     R31, R31_H
  714 );
  715 
  716 // Class for java thread register
  717 reg_class java_thread_reg(
  718     R23, R23_H
  719 );
  720 
  721 reg_class r28_reg(
  722     R28, R28_H
  723 );
  724 
  725 reg_class r29_reg(
  726     R29, R29_H
  727 );
  728 
  729 reg_class r30_reg(
  730     R30, R30_H
  731 );
  732 
  733 reg_class r31_reg(
  734     R31, R31_H
  735 );
  736 
  737 // Class for zero registesr
  738 reg_class zr_reg(
  739     R0, R0_H
  740 );
  741 
  742 // Class for thread register
  743 reg_class thread_reg(
  744     R4, R4_H
  745 );
  746 
  747 // Class for frame pointer register
  748 reg_class fp_reg(
  749     R8, R8_H
  750 );
  751 
  752 // Class for link register
  753 reg_class ra_reg(
  754     R1, R1_H
  755 );
  756 
  757 // Class for long sp register
  758 reg_class sp_reg(
  759     R2, R2_H
  760 );
  761 
  762 // Class for all float registers
  763 reg_class float_reg(
  764     F0,
  765     F1,
  766     F2,
  767     F3,
  768     F4,
  769     F5,
  770     F6,
  771     F7,
  772     F8,
  773     F9,
  774     F10,
  775     F11,
  776     F12,
  777     F13,
  778     F14,
  779     F15,
  780     F16,
  781     F17,
  782     F18,
  783     F19,
  784     F20,
  785     F21,
  786     F22,
  787     F23,
  788     F24,
  789     F25,
  790     F26,
  791     F27,
  792     F28,
  793     F29,
  794     F30,
  795     F31
  796 );
  797 
  798 // Double precision float registers have virtual `high halves' that
  799 // are needed by the allocator.
  800 // Class for all double registers
  801 reg_class double_reg(
  802     F0,  F0_H,
  803     F1,  F1_H,
  804     F2,  F2_H,
  805     F3,  F3_H,
  806     F4,  F4_H,
  807     F5,  F5_H,
  808     F6,  F6_H,
  809     F7,  F7_H,
  810     F8,  F8_H,
  811     F9,  F9_H,
  812     F10, F10_H,
  813     F11, F11_H,
  814     F12, F12_H,
  815     F13, F13_H,
  816     F14, F14_H,
  817     F15, F15_H,
  818     F16, F16_H,
  819     F17, F17_H,
  820     F18, F18_H,
  821     F19, F19_H,
  822     F20, F20_H,
  823     F21, F21_H,
  824     F22, F22_H,
  825     F23, F23_H,
  826     F24, F24_H,
  827     F25, F25_H,
  828     F26, F26_H,
  829     F27, F27_H,
  830     F28, F28_H,
  831     F29, F29_H,
  832     F30, F30_H,
  833     F31, F31_H
  834 );
  835 
  836 // Class for RVV vector registers
  837 // Note: v0, v30 and v31 are used as mask registers.
  838 reg_class vectora_reg(
  839     V1, V1_H, V1_J, V1_K,
  840     V2, V2_H, V2_J, V2_K,
  841     V3, V3_H, V3_J, V3_K,
  842     V4, V4_H, V4_J, V4_K,
  843     V5, V5_H, V5_J, V5_K,
  844     V6, V6_H, V6_J, V6_K,
  845     V7, V7_H, V7_J, V7_K,
  846     V8, V8_H, V8_J, V8_K,
  847     V9, V9_H, V9_J, V9_K,
  848     V10, V10_H, V10_J, V10_K,
  849     V11, V11_H, V11_J, V11_K,
  850     V12, V12_H, V12_J, V12_K,
  851     V13, V13_H, V13_J, V13_K,
  852     V14, V14_H, V14_J, V14_K,
  853     V15, V15_H, V15_J, V15_K,
  854     V16, V16_H, V16_J, V16_K,
  855     V17, V17_H, V17_J, V17_K,
  856     V18, V18_H, V18_J, V18_K,
  857     V19, V19_H, V19_J, V19_K,
  858     V20, V20_H, V20_J, V20_K,
  859     V21, V21_H, V21_J, V21_K,
  860     V22, V22_H, V22_J, V22_K,
  861     V23, V23_H, V23_J, V23_K,
  862     V24, V24_H, V24_J, V24_K,
  863     V25, V25_H, V25_J, V25_K,
  864     V26, V26_H, V26_J, V26_K,
  865     V27, V27_H, V27_J, V27_K,
  866     V28, V28_H, V28_J, V28_K,
  867     V29, V29_H, V29_J, V29_K
  868 );
  869 
  870 // Class for 64 bit register f0
  871 reg_class f0_reg(
  872     F0, F0_H
  873 );
  874 
  875 // Class for 64 bit register f1
  876 reg_class f1_reg(
  877     F1, F1_H
  878 );
  879 
  880 // Class for 64 bit register f2
  881 reg_class f2_reg(
  882     F2, F2_H
  883 );
  884 
  885 // Class for 64 bit register f3
  886 reg_class f3_reg(
  887     F3, F3_H
  888 );
  889 
  890 // class for vector register v1
  891 reg_class v1_reg(
  892     V1, V1_H, V1_J, V1_K
  893 );
  894 
  895 // class for vector register v2
  896 reg_class v2_reg(
  897     V2, V2_H, V2_J, V2_K
  898 );
  899 
  900 // class for vector register v3
  901 reg_class v3_reg(
  902     V3, V3_H, V3_J, V3_K
  903 );
  904 
  905 // class for vector register v4
  906 reg_class v4_reg(
  907     V4, V4_H, V4_J, V4_K
  908 );
  909 
  910 // class for vector register v5
  911 reg_class v5_reg(
  912     V5, V5_H, V5_J, V5_K
  913 );
  914 
  915 // class for vector register v6
  916 reg_class v6_reg(
  917     V6, V6_H, V6_J, V6_K
  918 );
  919 
  920 // class for vector register v7
  921 reg_class v7_reg(
  922     V7, V7_H, V7_J, V7_K
  923 );
  924 
  925 // class for vector register v8
  926 reg_class v8_reg(
  927     V8, V8_H, V8_J, V8_K
  928 );
  929 
  930 // class for vector register v9
  931 reg_class v9_reg(
  932     V9, V9_H, V9_J, V9_K
  933 );
  934 
  935 // class for vector register v10
  936 reg_class v10_reg(
  937     V10, V10_H, V10_J, V10_K
  938 );
  939 
  940 // class for vector register v11
  941 reg_class v11_reg(
  942     V11, V11_H, V11_J, V11_K
  943 );
  944 
  945 // class for vector register v12
  946 reg_class v12_reg(
  947     V12, V12_H, V12_J, V12_K
  948 );
  949 
  950 // class for vector register v13
  951 reg_class v13_reg(
  952     V13, V13_H, V13_J, V13_K
  953 );
  954 
  955 // class for vector register v14
  956 reg_class v14_reg(
  957     V14, V14_H, V14_J, V14_K
  958 );
  959 
  960 // class for vector register v15
  961 reg_class v15_reg(
  962     V15, V15_H, V15_J, V15_K
  963 );
  964 
  965 // class for condition codes
  966 reg_class reg_flags(RFLAGS);
  967 
  968 // Class for RVV v0 mask register
  969 // https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#53-vector-masking
  970 // The mask value used to control execution of a masked vector
  971 // instruction is always supplied by vector register v0.
  972 reg_class vmask_reg_v0 (
  973     V0
  974 );
  975 
  976 // Class for RVV mask registers
  977 // We need two more vmask registers to do the vector mask logical ops,
  978 // so define v30, v31 as mask register too.
  979 reg_class vmask_reg (
  980     V0,
  981     V30,
  982     V31
  983 );
  984 %}
  985 
  986 //----------DEFINITION BLOCK---------------------------------------------------
  987 // Define name --> value mappings to inform the ADLC of an integer valued name
  988 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  989 // Format:
  990 //        int_def  <name>         ( <int_value>, <expression>);
  991 // Generated Code in ad_<arch>.hpp
  992 //        #define  <name>   (<expression>)
  993 //        // value == <int_value>
  994 // Generated code in ad_<arch>.cpp adlc_verification()
  995 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  996 //
  997 
  998 // we follow the ppc-aix port in using a simple cost model which ranks
  999 // register operations as cheap, memory ops as more expensive and
 1000 // branches as most expensive. the first two have a low as well as a
 1001 // normal cost. huge cost appears to be a way of saying don't do
 1002 // something
 1003 
 1004 definitions %{
 1005   // The default cost (of a register move instruction).
 1006   int_def DEFAULT_COST         (  100,               100);
 1007   int_def ALU_COST             (  100,  1 * DEFAULT_COST);          // unknown, const, arith, shift, slt,
 1008                                                                     // multi, auipc, nop, logical, move
 1009   int_def LOAD_COST            (  300,  3 * DEFAULT_COST);          // load, fpload
 1010   int_def STORE_COST           (  100,  1 * DEFAULT_COST);          // store, fpstore
 1011   int_def XFER_COST            (  300,  3 * DEFAULT_COST);          // mfc, mtc, fcvt, fmove, fcmp
 1012   int_def FMVX_COST            (  100,  1 * DEFAULT_COST);          // shuffles with no conversion
 1013   int_def BRANCH_COST          (  200,  2 * DEFAULT_COST);          // branch, jmp, call
 1014   int_def IMUL_COST            ( 1000, 10 * DEFAULT_COST);          // imul
 1015   int_def IDIVSI_COST          ( 3400, 34 * DEFAULT_COST);          // idivsi
 1016   int_def IDIVDI_COST          ( 6600, 66 * DEFAULT_COST);          // idivdi
 1017   int_def FMUL_SINGLE_COST     (  500,  5 * DEFAULT_COST);          // fmul, fmadd
 1018   int_def FMUL_DOUBLE_COST     (  700,  7 * DEFAULT_COST);          // fmul, fmadd
 1019   int_def FDIV_COST            ( 2000, 20 * DEFAULT_COST);          // fdiv
 1020   int_def FSQRT_COST           ( 2500, 25 * DEFAULT_COST);          // fsqrt
 1021   int_def VOLATILE_REF_COST    ( 1000, 10 * DEFAULT_COST);
 1022   int_def CACHE_MISS_COST      ( 2000, 20 * DEFAULT_COST);          // typicall cache miss penalty
 1023 %}
 1024 
 1025 
 1026 
 1027 //----------SOURCE BLOCK-------------------------------------------------------
 1028 // This is a block of C++ code which provides values, functions, and
 1029 // definitions necessary in the rest of the architecture description
 1030 
 1031 source_hpp %{
 1032 
 1033 #include "asm/macroAssembler.hpp"
 1034 #include "gc/shared/barrierSetAssembler.hpp"
 1035 #include "gc/shared/cardTable.hpp"
 1036 #include "gc/shared/cardTableBarrierSet.hpp"
 1037 #include "gc/shared/collectedHeap.hpp"
 1038 #include "opto/addnode.hpp"
 1039 #include "opto/convertnode.hpp"
 1040 #include "runtime/objectMonitor.hpp"
 1041 
 1042 extern RegMask _ANY_REG32_mask;
 1043 extern RegMask _ANY_REG_mask;
 1044 extern RegMask _PTR_REG_mask;
 1045 extern RegMask _NO_SPECIAL_REG32_mask;
 1046 extern RegMask _NO_SPECIAL_REG_mask;
 1047 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1048 extern RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1049 
 1050 class CallStubImpl {
 1051 
 1052   //--------------------------------------------------------------
 1053   //---<  Used for optimization in Compile::shorten_branches  >---
 1054   //--------------------------------------------------------------
 1055 
 1056  public:
 1057   // Size of call trampoline stub.
 1058   static uint size_call_trampoline() {
 1059     return 0; // no call trampolines on this platform
 1060   }
 1061 
 1062   // number of relocations needed by a call trampoline stub
 1063   static uint reloc_call_trampoline() {
 1064     return 0; // no call trampolines on this platform
 1065   }
 1066 };
 1067 
 1068 class HandlerImpl {
 1069 
 1070  public:
 1071 
 1072   static int emit_exception_handler(C2_MacroAssembler *masm);
 1073   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1074 
 1075   static uint size_exception_handler() {
 1076     return MacroAssembler::far_branch_size();
 1077   }
 1078 
 1079   static uint size_deopt_handler() {
 1080     // count auipc + far branch
 1081     return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
 1082   }
 1083 };
 1084 
 1085 class Node::PD {
 1086 public:
 1087   enum NodeFlags {
 1088     _last_flag = Node::_last_flag
 1089   };
 1090 };
 1091 
 1092 bool is_CAS(int opcode, bool maybe_volatile);
 1093 
 1094 // predicate controlling translation of CompareAndSwapX
 1095 bool needs_acquiring_load_reserved(const Node *load);
 1096 
 1097 // predicate controlling addressing modes
 1098 bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1099 %}
 1100 
 1101 source %{
 1102 
 1103 // Derived RegMask with conditionally allocatable registers
 1104 
 1105 RegMask _ANY_REG32_mask;
 1106 RegMask _ANY_REG_mask;
 1107 RegMask _PTR_REG_mask;
 1108 RegMask _NO_SPECIAL_REG32_mask;
 1109 RegMask _NO_SPECIAL_REG_mask;
 1110 RegMask _NO_SPECIAL_PTR_REG_mask;
 1111 RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1112 
 1113 void reg_mask_init() {
 1114 
 1115   _ANY_REG32_mask = _ALL_REG32_mask;
 1116   _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg()));
 1117 
 1118   _ANY_REG_mask = _ALL_REG_mask;
 1119   _ANY_REG_mask.SUBTRACT(_ZR_REG_mask);
 1120 
 1121   _PTR_REG_mask = _ALL_REG_mask;
 1122   _PTR_REG_mask.SUBTRACT(_ZR_REG_mask);
 1123 
 1124   _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1125   _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1126 
 1127   _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1128   _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1129 
 1130   _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1131   _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1132 
 1133   // x27 is not allocatable when compressed oops is on
 1134   if (UseCompressedOops) {
 1135     _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1136     _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1137     _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1138   }
 1139 
 1140   // x8 is not allocatable when PreserveFramePointer is on
 1141   if (PreserveFramePointer) {
 1142     _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1143     _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1144     _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1145   }
 1146 
 1147   _NO_SPECIAL_NO_FP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1148   _NO_SPECIAL_NO_FP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1149 }
 1150 
 1151 void PhaseOutput::pd_perform_mach_node_analysis() {
 1152 }
 1153 
 1154 int MachNode::pd_alignment_required() const {
 1155   return 1;
 1156 }
 1157 
 1158 int MachNode::compute_padding(int current_offset) const {
 1159   return 0;
 1160 }
 1161 
 1162 // is_CAS(int opcode, bool maybe_volatile)
 1163 //
 1164 // return true if opcode is one of the possible CompareAndSwapX
 1165 // values otherwise false.
 1166 bool is_CAS(int opcode, bool maybe_volatile)
 1167 {
 1168   switch (opcode) {
 1169     // We handle these
 1170     case Op_CompareAndSwapI:
 1171     case Op_CompareAndSwapL:
 1172     case Op_CompareAndSwapP:
 1173     case Op_CompareAndSwapN:
 1174     case Op_ShenandoahCompareAndSwapP:
 1175     case Op_ShenandoahCompareAndSwapN:
 1176     case Op_CompareAndSwapB:
 1177     case Op_CompareAndSwapS:
 1178     case Op_GetAndSetI:
 1179     case Op_GetAndSetL:
 1180     case Op_GetAndSetP:
 1181     case Op_GetAndSetN:
 1182     case Op_GetAndAddI:
 1183     case Op_GetAndAddL:
 1184       return true;
 1185     case Op_CompareAndExchangeI:
 1186     case Op_CompareAndExchangeN:
 1187     case Op_CompareAndExchangeB:
 1188     case Op_CompareAndExchangeS:
 1189     case Op_CompareAndExchangeL:
 1190     case Op_CompareAndExchangeP:
 1191     case Op_WeakCompareAndSwapB:
 1192     case Op_WeakCompareAndSwapS:
 1193     case Op_WeakCompareAndSwapI:
 1194     case Op_WeakCompareAndSwapL:
 1195     case Op_WeakCompareAndSwapP:
 1196     case Op_WeakCompareAndSwapN:
 1197     case Op_ShenandoahWeakCompareAndSwapP:
 1198     case Op_ShenandoahWeakCompareAndSwapN:
 1199     case Op_ShenandoahCompareAndExchangeP:
 1200     case Op_ShenandoahCompareAndExchangeN:
 1201       return maybe_volatile;
 1202     default:
 1203       return false;
 1204   }
 1205 }
 1206 
 1207 // predicate controlling translation of CAS
 1208 //
 1209 // returns true if CAS needs to use an acquiring load otherwise false
 1210 bool needs_acquiring_load_reserved(const Node *n)
 1211 {
 1212   assert(n != nullptr && is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1213 
 1214   LoadStoreNode* ldst = n->as_LoadStore();
 1215   if (n != nullptr && is_CAS(n->Opcode(), false)) {
 1216     assert(ldst != nullptr && ldst->trailing_membar() != nullptr, "expected trailing membar");
 1217   } else {
 1218     return ldst != nullptr && ldst->trailing_membar() != nullptr;
 1219   }
 1220   // so we can just return true here
 1221   return true;
 1222 }
 1223 #define __ masm->
 1224 
 1225 // advance declarations for helper functions to convert register
 1226 // indices to register objects
 1227 
 1228 // the ad file has to provide implementations of certain methods
 1229 // expected by the generic code
 1230 //
 1231 // REQUIRED FUNCTIONALITY
 1232 
 1233 //=============================================================================
 1234 
 1235 // !!!!! Special hack to get all types of calls to specify the byte offset
 1236 //       from the start of the call to the point where the return address
 1237 //       will point.
 1238 
 1239 int MachCallStaticJavaNode::ret_addr_offset()
 1240 {
 1241   if (UseTrampolines) {
 1242     return 1 * NativeInstruction::instruction_size; // jal
 1243   }
 1244   return 3 * NativeInstruction::instruction_size; // auipc + ld + jalr
 1245 }
 1246 
 1247 int MachCallDynamicJavaNode::ret_addr_offset()
 1248 {
 1249   if (UseTrampolines) {
 1250     return NativeMovConstReg::movptr2_instruction_size +  NativeInstruction::instruction_size; // movptr2, jal
 1251   }
 1252   return NativeMovConstReg::movptr2_instruction_size + (3 * NativeInstruction::instruction_size); // movptr2, auipc + ld + jal
 1253 }
 1254 
 1255 int MachCallRuntimeNode::ret_addr_offset() {
 1256   // For generated stubs the call will be:
 1257   //   auipc + ld + jalr
 1258   // Using trampos:
 1259   //   jal(addr)
 1260   // or with far branches
 1261   //   jal(trampoline_stub)
 1262   // for real runtime callouts it will be 11 instructions
 1263   // see riscv_enc_java_to_runtime
 1264   //   la(t1, retaddr)                ->  auipc + addi
 1265   //   la(t0, RuntimeAddress(addr))   ->  lui + addi + slli + addi + slli + addi
 1266   //   addi(sp, sp, -2 * wordSize)    ->  addi
 1267   //   sd(t1, Address(sp, wordSize))  ->  sd
 1268   //   jalr(t0)                       ->  jalr
 1269   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1270   if (cb != nullptr) {
 1271     if (UseTrampolines) {
 1272       return 1 * NativeInstruction::instruction_size;
 1273     }
 1274     return 3 * NativeInstruction::instruction_size;
 1275   } else {
 1276     return 11 * NativeInstruction::instruction_size;
 1277   }
 1278 }
 1279 
 1280 //
 1281 // Compute padding required for nodes which need alignment
 1282 //
 1283 
 1284 // With RVC a call instruction may get 2-byte aligned.
 1285 // The address of the call instruction needs to be 4-byte aligned to
 1286 // ensure that it does not span a cache line so that it can be patched.
 1287 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
 1288 {
 1289   // to make sure the address of jal 4-byte aligned.
 1290   return align_up(current_offset, alignment_required()) - current_offset;
 1291 }
 1292 
 1293 // With RVC a call instruction may get 2-byte aligned.
 1294 // The address of the call instruction needs to be 4-byte aligned to
 1295 // ensure that it does not span a cache line so that it can be patched.
 1296 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
 1297 {
 1298   // skip the movptr2 in MacroAssembler::ic_call():
 1299   // lui, lui, slli, add, addi
 1300   // Though movptr2() has already 4-byte aligned with or without RVC,
 1301   // We need to prevent from further changes by explicitly calculating the size.
 1302   current_offset += NativeMovConstReg::movptr2_instruction_size;
 1303   // to make sure the address of jal 4-byte aligned.
 1304   return align_up(current_offset, alignment_required()) - current_offset;
 1305 }
 1306 
 1307 //=============================================================================
 1308 
 1309 #ifndef PRODUCT
 1310 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1311   assert_cond(st != nullptr);
 1312   st->print("BREAKPOINT");
 1313 }
 1314 #endif
 1315 
 1316 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1317   __ ebreak();
 1318 }
 1319 
 1320 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1321   return MachNode::size(ra_);
 1322 }
 1323 
 1324 //=============================================================================
 1325 
 1326 #ifndef PRODUCT
 1327   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1328     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1329   }
 1330 #endif
 1331 
 1332   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1333     Assembler::CompressibleRegion cr(masm); // nops shall be 2-byte under RVC for alignment purposes.
 1334     for (int i = 0; i < _count; i++) {
 1335       __ nop();
 1336     }
 1337   }
 1338 
 1339   uint MachNopNode::size(PhaseRegAlloc*) const {
 1340     return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size);
 1341   }
 1342 
 1343 //=============================================================================
 1344 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1345 
 1346 int ConstantTable::calculate_table_base_offset() const {
 1347   return 0;  // absolute addressing, no offset
 1348 }
 1349 
 1350 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1351 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1352   ShouldNotReachHere();
 1353 }
 1354 
 1355 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1356   // Empty encoding
 1357 }
 1358 
 1359 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1360   return 0;
 1361 }
 1362 
 1363 #ifndef PRODUCT
 1364 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1365   assert_cond(st != nullptr);
 1366   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1367 }
 1368 #endif
 1369 
 1370 #ifndef PRODUCT
 1371 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1372   assert_cond(st != nullptr && ra_ != nullptr);
 1373   Compile* C = ra_->C;
 1374 
 1375   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1376 
 1377   if (C->output()->need_stack_bang(framesize)) {
 1378     st->print("# stack bang size=%d\n\t", framesize);
 1379   }
 1380 
 1381   st->print("sd  fp, [sp, #%d]\n\t", - 2 * wordSize);
 1382   st->print("sd  ra, [sp, #%d]\n\t", - wordSize);
 1383   if (PreserveFramePointer) { st->print("sub  fp, sp, #%d\n\t", 2 * wordSize); }
 1384   st->print("sub sp, sp, #%d\n\t", framesize);
 1385 
 1386   if (C->stub_function() == nullptr && BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr) {
 1387     st->print("ld  t0, [guard]\n\t");
 1388     st->print("membar LoadLoad\n\t");
 1389     st->print("ld  t1, [xthread, #thread_disarmed_guard_value_offset]\n\t");
 1390     st->print("beq t0, t1, skip\n\t");
 1391     st->print("jalr #nmethod_entry_barrier_stub\n\t");
 1392     st->print("j skip\n\t");
 1393     st->print("guard: int\n\t");
 1394     st->print("skip:\n\t");
 1395   }
 1396 }
 1397 #endif
 1398 
 1399 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1400   assert_cond(ra_ != nullptr);
 1401   Compile* C = ra_->C;
 1402 
 1403   // n.b. frame size includes space for return pc and fp
 1404   const int framesize = C->output()->frame_size_in_bytes();
 1405 
 1406   // insert a nop at the start of the prolog so we can patch in a
 1407   // branch if we need to invalidate the method later
 1408   {
 1409     Assembler::IncompressibleRegion ir(masm);  // keep the nop as 4 bytes for patching.
 1410     MacroAssembler::assert_alignment(__ pc());
 1411     __ nop();  // 4 bytes
 1412   }
 1413 
 1414   assert_cond(C != nullptr);
 1415 
 1416   if (C->clinit_barrier_on_entry()) {
 1417     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1418 
 1419     Label L_skip_barrier;
 1420 
 1421     __ mov_metadata(t1, C->method()->holder()->constant_encoding());
 1422     __ clinit_barrier(t1, t0, &L_skip_barrier);
 1423     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1424     __ bind(L_skip_barrier);
 1425   }
 1426 
 1427   int bangsize = C->output()->bang_size_in_bytes();
 1428   if (C->output()->need_stack_bang(bangsize)) {
 1429     __ generate_stack_overflow_check(bangsize);
 1430   }
 1431 
 1432   __ build_frame(framesize);
 1433 
 1434   if (C->stub_function() == nullptr) {
 1435     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1436     if (BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr) {
 1437       // Dummy labels for just measuring the code size
 1438       Label dummy_slow_path;
 1439       Label dummy_continuation;
 1440       Label dummy_guard;
 1441       Label* slow_path = &dummy_slow_path;
 1442       Label* continuation = &dummy_continuation;
 1443       Label* guard = &dummy_guard;
 1444       if (!Compile::current()->output()->in_scratch_emit_size()) {
 1445         // Use real labels from actual stub when not emitting code for purpose of measuring its size
 1446         C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1447         Compile::current()->output()->add_stub(stub);
 1448         slow_path = &stub->entry();
 1449         continuation = &stub->continuation();
 1450         guard = &stub->guard();
 1451       }
 1452       // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1453       bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1454     }
 1455   }
 1456 
 1457   if (VerifyStackAtCalls) {
 1458     Unimplemented();
 1459   }
 1460 
 1461   C->output()->set_frame_complete(__ offset());
 1462 
 1463   if (C->has_mach_constant_base_node()) {
 1464     // NOTE: We set the table base offset here because users might be
 1465     // emitted before MachConstantBaseNode.
 1466     ConstantTable& constant_table = C->output()->constant_table();
 1467     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1468   }
 1469 }
 1470 
 1471 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1472 {
 1473   assert_cond(ra_ != nullptr);
 1474   return MachNode::size(ra_); // too many variables; just compute it
 1475                               // the hard way
 1476 }
 1477 
 1478 int MachPrologNode::reloc() const
 1479 {
 1480   return 0;
 1481 }
 1482 
 1483 //=============================================================================
 1484 
 1485 #ifndef PRODUCT
 1486 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1487   assert_cond(st != nullptr && ra_ != nullptr);
 1488   Compile* C = ra_->C;
 1489   assert_cond(C != nullptr);
 1490   int framesize = C->output()->frame_size_in_bytes();
 1491 
 1492   st->print("# pop frame %d\n\t", framesize);
 1493 
 1494   if (framesize == 0) {
 1495     st->print("ld  ra, [sp,#%d]\n\t", (2 * wordSize));
 1496     st->print("ld  fp, [sp,#%d]\n\t", (3 * wordSize));
 1497     st->print("add sp, sp, #%d\n\t", (2 * wordSize));
 1498   } else {
 1499     st->print("add  sp, sp, #%d\n\t", framesize);
 1500     st->print("ld  ra, [sp,#%d]\n\t", - 2 * wordSize);
 1501     st->print("ld  fp, [sp,#%d]\n\t", - wordSize);
 1502   }
 1503 
 1504   if (do_polling() && C->is_method_compilation()) {
 1505     st->print("# test polling word\n\t");
 1506     st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset()));
 1507     st->print("bgtu sp, t0, #slow_path");
 1508   }
 1509 }
 1510 #endif
 1511 
 1512 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1513   assert_cond(ra_ != nullptr);
 1514   Compile* C = ra_->C;
 1515   assert_cond(C != nullptr);
 1516   int framesize = C->output()->frame_size_in_bytes();
 1517 
 1518   __ remove_frame(framesize);
 1519 
 1520   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1521     __ reserved_stack_check();
 1522   }
 1523 
 1524   if (do_polling() && C->is_method_compilation()) {
 1525     Label dummy_label;
 1526     Label* code_stub = &dummy_label;
 1527     if (!C->output()->in_scratch_emit_size()) {
 1528       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1529       C->output()->add_stub(stub);
 1530       code_stub = &stub->entry();
 1531     }
 1532     __ relocate(relocInfo::poll_return_type);
 1533     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1534   }
 1535 }
 1536 
 1537 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1538   assert_cond(ra_ != nullptr);
 1539   // Variable size. Determine dynamically.
 1540   return MachNode::size(ra_);
 1541 }
 1542 
 1543 int MachEpilogNode::reloc() const {
 1544   // Return number of relocatable values contained in this instruction.
 1545   return 1; // 1 for polling page.
 1546 }
 1547 const Pipeline * MachEpilogNode::pipeline() const {
 1548   return MachNode::pipeline_class();
 1549 }
 1550 
 1551 //=============================================================================
 1552 
 1553 // Figure out which register class each belongs in: rc_int, rc_float or
 1554 // rc_stack.
 1555 enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack };
 1556 
 1557 static enum RC rc_class(OptoReg::Name reg) {
 1558 
 1559   if (reg == OptoReg::Bad) {
 1560     return rc_bad;
 1561   }
 1562 
 1563   // we have 30 int registers * 2 halves
 1564   // (t0 and t1 are omitted)
 1565   int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2);
 1566   if (reg < slots_of_int_registers) {
 1567     return rc_int;
 1568   }
 1569 
 1570   // we have 32 float register * 2 halves
 1571   int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers;
 1572   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1573     return rc_float;
 1574   }
 1575 
 1576   // we have 32 vector register * 4 halves
 1577   int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers;
 1578   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) {
 1579     return rc_vector;
 1580   }
 1581 
 1582   // Between vector regs & stack is the flags regs.
 1583   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1584 
 1585   return rc_stack;
 1586 }
 1587 
 1588 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1589   assert_cond(ra_ != nullptr);
 1590   Compile* C = ra_->C;
 1591 
 1592   // Get registers to move.
 1593   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1594   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1595   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1596   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1597 
 1598   enum RC src_hi_rc = rc_class(src_hi);
 1599   enum RC src_lo_rc = rc_class(src_lo);
 1600   enum RC dst_hi_rc = rc_class(dst_hi);
 1601   enum RC dst_lo_rc = rc_class(dst_lo);
 1602 
 1603   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1604 
 1605   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1606     assert((src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1607            (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi,
 1608            "expected aligned-adjacent pairs");
 1609   }
 1610 
 1611   if (src_lo == dst_lo && src_hi == dst_hi) {
 1612     return 0;            // Self copy, no move.
 1613   }
 1614 
 1615   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1616               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1617   int src_offset = ra_->reg2offset(src_lo);
 1618   int dst_offset = ra_->reg2offset(dst_lo);
 1619 
 1620   if (bottom_type()->isa_vect() != nullptr) {
 1621     uint ireg = ideal_reg();
 1622     if (ireg == Op_VecA && masm) {
 1623       int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1624       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1625         // stack to stack
 1626         __ spill_copy_vector_stack_to_stack(src_offset, dst_offset,
 1627                                             vector_reg_size_in_bytes);
 1628       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1629         // vpr to stack
 1630         __ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1631       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1632         // stack to vpr
 1633         __ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1634       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1635         // vpr to vpr
 1636         __ vmv1r_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1637       } else {
 1638         ShouldNotReachHere();
 1639       }
 1640     } else if (bottom_type()->isa_vectmask() && masm) {
 1641       int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
 1642       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1643         // stack to stack
 1644         __ spill_copy_vmask_stack_to_stack(src_offset, dst_offset,
 1645                                            vmask_size_in_bytes);
 1646       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1647         // vmask to stack
 1648         __ spill_vmask(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1649       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1650         // stack to vmask
 1651         __ unspill_vmask(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1652       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1653         // vmask to vmask
 1654         __ vmv1r_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1655       } else {
 1656         ShouldNotReachHere();
 1657       }
 1658     }
 1659   } else if (masm != nullptr) {
 1660     switch (src_lo_rc) {
 1661       case rc_int:
 1662         if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 1663           if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass
 1664             __ zero_extend(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32);
 1665           } else {
 1666             __ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]));
 1667           }
 1668         } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 1669           if (is64) {
 1670             __ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1671                        as_Register(Matcher::_regEncode[src_lo]));
 1672           } else {
 1673             __ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1674                        as_Register(Matcher::_regEncode[src_lo]));
 1675           }
 1676         } else {                    // gpr --> stack spill
 1677           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1678           __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 1679         }
 1680         break;
 1681       case rc_float:
 1682         if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 1683           if (is64) {
 1684             __ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]),
 1685                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1686           } else {
 1687             __ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]),
 1688                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1689           }
 1690         } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 1691           if (is64) {
 1692             __ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1693                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1694           } else {
 1695             __ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1696                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1697           }
 1698         } else {                    // fpr --> stack spill
 1699           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1700           __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 1701                    is64, dst_offset);
 1702         }
 1703         break;
 1704       case rc_stack:
 1705         if (dst_lo_rc == rc_int) {  // stack --> gpr load
 1706           if (this->ideal_reg() == Op_RegI) {
 1707             __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1708           } else { // // zero extended for narrow oop or klass
 1709             __ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1710           }
 1711         } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 1712           __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1713                      is64, src_offset);
 1714         } else {                    // stack --> stack copy
 1715           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1716           if (this->ideal_reg() == Op_RegI) {
 1717             __ unspill(t0, is64, src_offset);
 1718           } else { // zero extended for narrow oop or klass
 1719             __ unspillu(t0, is64, src_offset);
 1720           }
 1721           __ spill(t0, is64, dst_offset);
 1722         }
 1723         break;
 1724       default:
 1725         ShouldNotReachHere();
 1726     }
 1727   }
 1728 
 1729   if (st != nullptr) {
 1730     st->print("spill ");
 1731     if (src_lo_rc == rc_stack) {
 1732       st->print("[sp, #%d] -> ", src_offset);
 1733     } else {
 1734       st->print("%s -> ", Matcher::regName[src_lo]);
 1735     }
 1736     if (dst_lo_rc == rc_stack) {
 1737       st->print("[sp, #%d]", dst_offset);
 1738     } else {
 1739       st->print("%s", Matcher::regName[dst_lo]);
 1740     }
 1741     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1742       int vsize = 0;
 1743       if (ideal_reg() == Op_VecA) {
 1744         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 1745       } else {
 1746         ShouldNotReachHere();
 1747       }
 1748       st->print("\t# vector spill size = %d", vsize);
 1749     } else if (ideal_reg() == Op_RegVectMask) {
 1750       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 1751       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 1752       st->print("\t# vmask spill size = %d", vsize);
 1753     } else {
 1754       st->print("\t# spill size = %d", is64 ? 64 : 32);
 1755     }
 1756   }
 1757 
 1758   return 0;
 1759 }
 1760 
 1761 #ifndef PRODUCT
 1762 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1763   if (ra_ == nullptr) {
 1764     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 1765   } else {
 1766     implementation(nullptr, ra_, false, st);
 1767   }
 1768 }
 1769 #endif
 1770 
 1771 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1772   implementation(masm, ra_, false, nullptr);
 1773 }
 1774 
 1775 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 1776   return MachNode::size(ra_);
 1777 }
 1778 
 1779 //=============================================================================
 1780 
 1781 #ifndef PRODUCT
 1782 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1783   assert_cond(ra_ != nullptr && st != nullptr);
 1784   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1785   int reg = ra_->get_reg_first(this);
 1786   st->print("add %s, sp, #%d\t# box lock",
 1787             Matcher::regName[reg], offset);
 1788 }
 1789 #endif
 1790 
 1791 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1792   Assembler::IncompressibleRegion ir(masm);  // Fixed length: see BoxLockNode::size()
 1793 
 1794   assert_cond(ra_ != nullptr);
 1795   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1796   int reg    = ra_->get_encode(this);
 1797 
 1798   if (Assembler::is_simm12(offset)) {
 1799     __ addi(as_Register(reg), sp, offset);
 1800   } else {
 1801     __ li32(t0, offset);
 1802     __ add(as_Register(reg), sp, t0);
 1803   }
 1804 }
 1805 
 1806 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 1807   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 1808   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1809 
 1810   if (Assembler::is_simm12(offset)) {
 1811     return NativeInstruction::instruction_size;
 1812   } else {
 1813     return 3 * NativeInstruction::instruction_size; // lui + addiw + add;
 1814   }
 1815 }
 1816 
 1817 //=============================================================================
 1818 
 1819 #ifndef PRODUCT
 1820 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 1821 {
 1822   assert_cond(st != nullptr);
 1823   st->print_cr("# MachUEPNode");
 1824   if (UseCompressedClassPointers) {
 1825     st->print_cr("\tlwu t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 1826     st->print_cr("\tlwu t2, [t1      + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 1827   } else {
 1828     st->print_cr("\tld t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 1829     st->print_cr("\tld t2, [t1      + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 1830   }
 1831   st->print_cr("\tbeq t0, t2, ic_hit");
 1832   st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
 1833   st->print_cr("\tic_hit:");
 1834 }
 1835 #endif
 1836 
 1837 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 1838 {
 1839   // This is the unverified entry point.
 1840   __ ic_check(CodeEntryAlignment);
 1841 
 1842   // Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
 1843   // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
 1844   assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
 1845 }
 1846 
 1847 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 1848 {
 1849   assert_cond(ra_ != nullptr);
 1850   return MachNode::size(ra_);
 1851 }
 1852 
 1853 // REQUIRED EMIT CODE
 1854 
 1855 //=============================================================================
 1856 
 1857 // Emit exception handler code.
 1858 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 1859 {
 1860   // auipc t0, #exception_blob_entry_point
 1861   // jr (offset)t0
 1862   // Note that the code buffer's insts_mark is always relative to insts.
 1863   // That's why we must use the macroassembler to generate a handler.
 1864   address base = __ start_a_stub(size_exception_handler());
 1865   if (base == nullptr) {
 1866     ciEnv::current()->record_failure("CodeCache is full");
 1867     return 0;  // CodeBuffer::expand failed
 1868   }
 1869   int offset = __ offset();
 1870   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 1871   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 1872   __ end_a_stub();
 1873   return offset;
 1874 }
 1875 
 1876 // Emit deopt handler code.
 1877 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 1878 {
 1879   address base = __ start_a_stub(size_deopt_handler());
 1880   if (base == nullptr) {
 1881     ciEnv::current()->record_failure("CodeCache is full");
 1882     return 0;  // CodeBuffer::expand failed
 1883   }
 1884   int offset = __ offset();
 1885 
 1886   __ auipc(ra, 0);
 1887   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 1888 
 1889   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 1890   __ end_a_stub();
 1891   return offset;
 1892 
 1893 }
 1894 // REQUIRED MATCHER CODE
 1895 
 1896 //=============================================================================
 1897 
 1898 bool Matcher::match_rule_supported(int opcode) {
 1899   if (!has_match_rule(opcode)) {
 1900     return false;
 1901   }
 1902 
 1903   switch (opcode) {
 1904     case Op_OnSpinWait:
 1905       return VM_Version::supports_on_spin_wait();
 1906     case Op_CacheWB:           // fall through
 1907     case Op_CacheWBPreSync:    // fall through
 1908     case Op_CacheWBPostSync:
 1909       if (!VM_Version::supports_data_cache_line_flush()) {
 1910         return false;
 1911       }
 1912       break;
 1913 
 1914     case Op_ExpandBits:        // fall through
 1915     case Op_CompressBits:      // fall through
 1916       guarantee(UseRVV == (MaxVectorSize >= 16), "UseRVV and MaxVectorSize not matched");
 1917     case Op_StrCompressedCopy: // fall through
 1918     case Op_StrInflatedCopy:   // fall through
 1919     case Op_CountPositives:    // fall through
 1920     case Op_EncodeISOArray:
 1921       return UseRVV;
 1922 
 1923     // Current test shows that, it brings performance gain when MaxVectorSize >= 32, but brings
 1924     // regression when MaxVectorSize == 16. So only enable the intrinsic when MaxVectorSize >= 32.
 1925     case Op_RoundVF:
 1926       return UseRVV && MaxVectorSize >= 32;
 1927 
 1928     // For double, current test shows that even with MaxVectorSize == 32, there is still some regression.
 1929     // Although there is no hardware to verify it for now, from the trend of performance data on hardwares
 1930     // (with vlenb == 16 and 32 respectively), it's promising to bring better performance rather than
 1931     // regression for double when MaxVectorSize == 64+. So only enable the intrinsic when MaxVectorSize >= 64.
 1932     case Op_RoundVD:
 1933       return UseRVV && MaxVectorSize >= 64;
 1934 
 1935     case Op_PopCountI:
 1936     case Op_PopCountL:
 1937       return UsePopCountInstruction;
 1938 
 1939     case Op_ReverseBytesI:
 1940     case Op_ReverseBytesL:
 1941     case Op_ReverseBytesS:
 1942     case Op_ReverseBytesUS:
 1943     case Op_RotateRight:
 1944     case Op_RotateLeft:
 1945     case Op_CountLeadingZerosI:
 1946     case Op_CountLeadingZerosL:
 1947     case Op_CountTrailingZerosI:
 1948     case Op_CountTrailingZerosL:
 1949       return UseZbb;
 1950 
 1951     case Op_FmaF:
 1952     case Op_FmaD:
 1953     case Op_FmaVF:
 1954     case Op_FmaVD:
 1955       return UseFMA;
 1956 
 1957     case Op_ConvHF2F:
 1958     case Op_ConvF2HF:
 1959       return UseZfh;
 1960   }
 1961 
 1962   return true; // Per default match rules are supported.
 1963 }
 1964 
 1965 const RegMask* Matcher::predicate_reg_mask(void) {
 1966   return &_VMASK_REG_mask;
 1967 }
 1968 
 1969 const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
 1970   return new TypeVectMask(elemTy, length);
 1971 }
 1972 
 1973 // Vector calling convention not yet implemented.
 1974 bool Matcher::supports_vector_calling_convention(void) {
 1975   return false;
 1976 }
 1977 
 1978 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 1979   Unimplemented();
 1980   return OptoRegPair(0, 0);
 1981 }
 1982 
 1983 // Is this branch offset short enough that a short branch can be used?
 1984 //
 1985 // NOTE: If the platform does not provide any short branch variants, then
 1986 //       this method should return false for offset 0.
 1987 // |---label(L1)-----|
 1988 // |-----------------|
 1989 // |-----------------|----------eq: float-------------------
 1990 // |-----------------| // far_cmpD_branch   |   cmpD_branch
 1991 // |------- ---------|    feq;              |      feq;
 1992 // |-far_cmpD_branch-|    beqz done;        |      bnez L;
 1993 // |-----------------|    j L;              |
 1994 // |-----------------|    bind(done);       |
 1995 // |-----------------|--------------------------------------
 1996 // |-----------------| // so shortBrSize = br_size - 4;
 1997 // |-----------------| // so offs = offset - shortBrSize + 4;
 1998 // |---label(L2)-----|
 1999 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2000   // The passed offset is relative to address of the branch.
 2001   int shortBrSize = br_size - 4;
 2002   int offs = offset - shortBrSize + 4;
 2003   return (-4096 <= offs && offs < 4096);
 2004 }
 2005 
 2006 // Vector width in bytes.
 2007 int Matcher::vector_width_in_bytes(BasicType bt) {
 2008   if (UseRVV) {
 2009     // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
 2010     // MaxVectorSize == VM_Version::_initial_vector_length
 2011     int size = MaxVectorSize;
 2012     // Minimum 2 values in vector
 2013     if (size < 2 * type2aelembytes(bt)) size = 0;
 2014     // But never < 4
 2015     if (size < 4) size = 0;
 2016     return size;
 2017   }
 2018   return 0;
 2019 }
 2020 
 2021 // Limits on vector size (number of elements) loaded into vector.
 2022 int Matcher::max_vector_size(const BasicType bt) {
 2023   return vector_width_in_bytes(bt) / type2aelembytes(bt);
 2024 }
 2025 
 2026 int Matcher::min_vector_size(const BasicType bt) {
 2027   int max_size = max_vector_size(bt);
 2028   // Limit the min vector size to 8 bytes.
 2029   int size = 8 / type2aelembytes(bt);
 2030   if (bt == T_BYTE) {
 2031     // To support vector api shuffle/rearrange.
 2032     size = 4;
 2033   } else if (bt == T_BOOLEAN) {
 2034     // To support vector api load/store mask.
 2035     size = 2;
 2036   }
 2037   if (size < 2) size = 2;
 2038   return MIN2(size, max_size);
 2039 }
 2040 
 2041 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2042   return Matcher::max_vector_size(bt);
 2043 }
 2044 
 2045 // Vector ideal reg.
 2046 uint Matcher::vector_ideal_reg(int len) {
 2047   assert(MaxVectorSize >= len, "");
 2048   if (UseRVV) {
 2049     return Op_VecA;
 2050   }
 2051 
 2052   ShouldNotReachHere();
 2053   return 0;
 2054 }
 2055 
 2056 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2057   return Matcher::max_vector_size(bt);
 2058 }
 2059 
 2060 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2061   ShouldNotReachHere(); // generic vector operands not supported
 2062   return nullptr;
 2063 }
 2064 
 2065 bool Matcher::is_reg2reg_move(MachNode* m) {
 2066   ShouldNotReachHere(); // generic vector operands not supported
 2067   return false;
 2068 }
 2069 
 2070 bool Matcher::is_generic_vector(MachOper* opnd) {
 2071   ShouldNotReachHere(); // generic vector operands not supported
 2072   return false;
 2073 }
 2074 
 2075 // Return whether or not this register is ever used as an argument.
 2076 // This function is used on startup to build the trampoline stubs in
 2077 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2078 // call in the trampoline, and arguments in those registers not be
 2079 // available to the callee.
 2080 bool Matcher::can_be_java_arg(int reg)
 2081 {
 2082   return
 2083     reg ==  R10_num || reg == R10_H_num ||
 2084     reg ==  R11_num || reg == R11_H_num ||
 2085     reg ==  R12_num || reg == R12_H_num ||
 2086     reg ==  R13_num || reg == R13_H_num ||
 2087     reg ==  R14_num || reg == R14_H_num ||
 2088     reg ==  R15_num || reg == R15_H_num ||
 2089     reg ==  R16_num || reg == R16_H_num ||
 2090     reg ==  R17_num || reg == R17_H_num ||
 2091     reg ==  F10_num || reg == F10_H_num ||
 2092     reg ==  F11_num || reg == F11_H_num ||
 2093     reg ==  F12_num || reg == F12_H_num ||
 2094     reg ==  F13_num || reg == F13_H_num ||
 2095     reg ==  F14_num || reg == F14_H_num ||
 2096     reg ==  F15_num || reg == F15_H_num ||
 2097     reg ==  F16_num || reg == F16_H_num ||
 2098     reg ==  F17_num || reg == F17_H_num;
 2099 }
 2100 
 2101 bool Matcher::is_spillable_arg(int reg)
 2102 {
 2103   return can_be_java_arg(reg);
 2104 }
 2105 
 2106 uint Matcher::int_pressure_limit()
 2107 {
 2108   // A derived pointer is live at CallNode and then is flagged by RA
 2109   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2110   // derived pointers and lastly fail to spill after reaching maximum
 2111   // number of iterations. Lowering the default pressure threshold to
 2112   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2113   // a high register pressure area of the code so that split_DEF can
 2114   // generate DefinitionSpillCopy for the derived pointer.
 2115   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2116   if (!PreserveFramePointer) {
 2117     // When PreserveFramePointer is off, frame pointer is allocatable,
 2118     // but different from other SOC registers, it is excluded from
 2119     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2120     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2121     // See check_pressure_at_fatproj().
 2122     default_int_pressure_threshold--;
 2123   }
 2124   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2125 }
 2126 
 2127 uint Matcher::float_pressure_limit()
 2128 {
 2129   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2130   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2131 }
 2132 
 2133 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2134   return false;
 2135 }
 2136 
 2137 RegMask Matcher::divI_proj_mask() {
 2138   ShouldNotReachHere();
 2139   return RegMask();
 2140 }
 2141 
 2142 // Register for MODI projection of divmodI.
 2143 RegMask Matcher::modI_proj_mask() {
 2144   ShouldNotReachHere();
 2145   return RegMask();
 2146 }
 2147 
 2148 // Register for DIVL projection of divmodL.
 2149 RegMask Matcher::divL_proj_mask() {
 2150   ShouldNotReachHere();
 2151   return RegMask();
 2152 }
 2153 
 2154 // Register for MODL projection of divmodL.
 2155 RegMask Matcher::modL_proj_mask() {
 2156   ShouldNotReachHere();
 2157   return RegMask();
 2158 }
 2159 
 2160 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2161   return FP_REG_mask();
 2162 }
 2163 
 2164 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2165   assert_cond(addp != nullptr);
 2166   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2167     Node* u = addp->fast_out(i);
 2168     if (u != nullptr && u->is_Mem()) {
 2169       int opsize = u->as_Mem()->memory_size();
 2170       assert(opsize > 0, "unexpected memory operand size");
 2171       if (u->as_Mem()->memory_size() != (1 << shift)) {
 2172         return false;
 2173       }
 2174     }
 2175   }
 2176   return true;
 2177 }
 2178 
 2179 // Binary src (Replicate scalar/immediate)
 2180 static bool is_vector_scalar_bitwise_pattern(Node* n, Node* m) {
 2181   if (n == nullptr || m == nullptr) {
 2182     return false;
 2183   }
 2184 
 2185   if (m->Opcode() != Op_Replicate) {
 2186     return false;
 2187   }
 2188 
 2189   switch (n->Opcode()) {
 2190     case Op_AndV:
 2191     case Op_OrV:
 2192     case Op_XorV:
 2193     case Op_AddVB:
 2194     case Op_AddVS:
 2195     case Op_AddVI:
 2196     case Op_AddVL:
 2197     case Op_SubVB:
 2198     case Op_SubVS:
 2199     case Op_SubVI:
 2200     case Op_SubVL:
 2201     case Op_MulVB:
 2202     case Op_MulVS:
 2203     case Op_MulVI:
 2204     case Op_MulVL: {
 2205       return true;
 2206     }
 2207     default:
 2208       return false;
 2209   }
 2210 }
 2211 
 2212 // (XorV src (Replicate m1))
 2213 // (XorVMask src (MaskAll m1))
 2214 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2215   if (n != nullptr && m != nullptr) {
 2216     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2217            VectorNode::is_all_ones_vector(m);
 2218   }
 2219   return false;
 2220 }
 2221 
 2222 // Should the Matcher clone input 'm' of node 'n'?
 2223 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2224   assert_cond(m != nullptr);
 2225   if (is_vshift_con_pattern(n, m) || // ShiftV src (ShiftCntV con)
 2226       is_vector_bitwise_not_pattern(n, m) ||
 2227       is_vector_scalar_bitwise_pattern(n, m) ||
 2228       is_encode_and_store_pattern(n, m)) {
 2229     mstack.push(m, Visit);
 2230     return true;
 2231   }
 2232   return false;
 2233 }
 2234 
 2235 // Should the Matcher clone shifts on addressing modes, expecting them
 2236 // to be subsumed into complex addressing expressions or compute them
 2237 // into registers?
 2238 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2239   return clone_base_plus_offset_address(m, mstack, address_visited);
 2240 }
 2241 
 2242 %}
 2243 
 2244 
 2245 
 2246 //----------ENCODING BLOCK-----------------------------------------------------
 2247 // This block specifies the encoding classes used by the compiler to
 2248 // output byte streams.  Encoding classes are parameterized macros
 2249 // used by Machine Instruction Nodes in order to generate the bit
 2250 // encoding of the instruction.  Operands specify their base encoding
 2251 // interface with the interface keyword.  There are currently
 2252 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2253 // COND_INTER.  REG_INTER causes an operand to generate a function
 2254 // which returns its register number when queried.  CONST_INTER causes
 2255 // an operand to generate a function which returns the value of the
 2256 // constant when queried.  MEMORY_INTER causes an operand to generate
 2257 // four functions which return the Base Register, the Index Register,
 2258 // the Scale Value, and the Offset Value of the operand when queried.
 2259 // COND_INTER causes an operand to generate six functions which return
 2260 // the encoding code (ie - encoding bits for the instruction)
 2261 // associated with each basic boolean condition for a conditional
 2262 // instruction.
 2263 //
 2264 // Instructions specify two basic values for encoding.  Again, a
 2265 // function is available to check if the constant displacement is an
 2266 // oop. They use the ins_encode keyword to specify their encoding
 2267 // classes (which must be a sequence of enc_class names, and their
 2268 // parameters, specified in the encoding block), and they use the
 2269 // opcode keyword to specify, in order, their primary, secondary, and
 2270 // tertiary opcode.  Only the opcode sections which a particular
 2271 // instruction needs for encoding need to be specified.
 2272 encode %{
 2273   // BEGIN Non-volatile memory access
 2274 
 2275   enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
 2276     int64_t con = (int64_t)$src$$constant;
 2277     Register dst_reg = as_Register($dst$$reg);
 2278     __ mv(dst_reg, con);
 2279   %}
 2280 
 2281   enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
 2282     Register dst_reg = as_Register($dst$$reg);
 2283     address con = (address)$src$$constant;
 2284     if (con == nullptr || con == (address)1) {
 2285       ShouldNotReachHere();
 2286     } else {
 2287       relocInfo::relocType rtype = $src->constant_reloc();
 2288       if (rtype == relocInfo::oop_type) {
 2289         __ movoop(dst_reg, (jobject)con);
 2290       } else if (rtype == relocInfo::metadata_type) {
 2291         __ mov_metadata(dst_reg, (Metadata*)con);
 2292       } else {
 2293         assert(rtype == relocInfo::none, "unexpected reloc type");
 2294         __ mv(dst_reg, $src$$constant);
 2295       }
 2296     }
 2297   %}
 2298 
 2299   enc_class riscv_enc_mov_p1(iRegP dst) %{
 2300     Register dst_reg = as_Register($dst$$reg);
 2301     __ mv(dst_reg, 1);
 2302   %}
 2303 
 2304   enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{
 2305     __ load_byte_map_base($dst$$Register);
 2306   %}
 2307 
 2308   enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
 2309     Register dst_reg = as_Register($dst$$reg);
 2310     address con = (address)$src$$constant;
 2311     if (con == nullptr) {
 2312       ShouldNotReachHere();
 2313     } else {
 2314       relocInfo::relocType rtype = $src->constant_reloc();
 2315       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 2316       __ set_narrow_oop(dst_reg, (jobject)con);
 2317     }
 2318   %}
 2319 
 2320   enc_class riscv_enc_mov_zero(iRegNorP dst) %{
 2321     Register dst_reg = as_Register($dst$$reg);
 2322     __ mv(dst_reg, zr);
 2323   %}
 2324 
 2325   enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
 2326     Register dst_reg = as_Register($dst$$reg);
 2327     address con = (address)$src$$constant;
 2328     if (con == nullptr) {
 2329       ShouldNotReachHere();
 2330     } else {
 2331       relocInfo::relocType rtype = $src->constant_reloc();
 2332       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 2333       __ set_narrow_klass(dst_reg, (Klass *)con);
 2334     }
 2335   %}
 2336 
 2337   enc_class riscv_enc_cmpxchgw(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2338     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 2339                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2340                /*result as bool*/ true);
 2341   %}
 2342 
 2343   enc_class riscv_enc_cmpxchgn(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2344     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 2345                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2346                /*result as bool*/ true);
 2347   %}
 2348 
 2349   enc_class riscv_enc_cmpxchg(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
 2350     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 2351                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2352                /*result as bool*/ true);
 2353   %}
 2354 
 2355   enc_class riscv_enc_cmpxchgw_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2356     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 2357                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2358                /*result as bool*/ true);
 2359   %}
 2360 
 2361   enc_class riscv_enc_cmpxchgn_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2362     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 2363                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2364                /*result as bool*/ true);
 2365   %}
 2366 
 2367   enc_class riscv_enc_cmpxchg_acq(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
 2368     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 2369                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2370                /*result as bool*/ true);
 2371   %}
 2372 
 2373   // compare and branch instruction encodings
 2374 
 2375   enc_class riscv_enc_j(label lbl) %{
 2376     Label* L = $lbl$$label;
 2377     __ j(*L);
 2378   %}
 2379 
 2380   enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
 2381     Label* L = $lbl$$label;
 2382     switch ($cmp$$cmpcode) {
 2383       case(BoolTest::ge):
 2384         __ j(*L);
 2385         break;
 2386       case(BoolTest::lt):
 2387         break;
 2388       default:
 2389         Unimplemented();
 2390     }
 2391   %}
 2392 
 2393   // call instruction encodings
 2394 
 2395   enc_class riscv_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) %{
 2396     Register sub_reg = as_Register($sub$$reg);
 2397     Register super_reg = as_Register($super$$reg);
 2398     Register temp_reg = as_Register($temp$$reg);
 2399     Register result_reg = as_Register($result$$reg);
 2400     Register cr_reg = t1;
 2401 
 2402     Label miss;
 2403     Label done;
 2404     __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 2405                                      nullptr, &miss);
 2406     if ($primary) {
 2407       __ mv(result_reg, zr);
 2408     } else {
 2409       __ mv(cr_reg, zr);
 2410       __ j(done);
 2411     }
 2412 
 2413     __ bind(miss);
 2414     if (!$primary) {
 2415       __ mv(cr_reg, 1);
 2416     }
 2417 
 2418     __ bind(done);
 2419   %}
 2420 
 2421   enc_class riscv_enc_java_static_call(method meth) %{
 2422     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2423 
 2424     address addr = (address)$meth$$method;
 2425     address call = nullptr;
 2426     assert_cond(addr != nullptr);
 2427     if (!_method) {
 2428       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 2429       call = __ reloc_call(Address(addr, relocInfo::runtime_call_type));
 2430       if (call == nullptr) {
 2431         ciEnv::current()->record_failure("CodeCache is full");
 2432         return;
 2433       }
 2434     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 2435       // The NOP here is purely to ensure that eliding a call to
 2436       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 2437       __ nop();
 2438       if (!UseTrampolines) {
 2439         __ nop();
 2440         __ nop();
 2441       }
 2442       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 2443     } else {
 2444       int method_index = resolved_method_index(masm);
 2445       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 2446                                                   : static_call_Relocation::spec(method_index);
 2447       call = __ reloc_call(Address(addr, rspec));
 2448       if (call == nullptr) {
 2449         ciEnv::current()->record_failure("CodeCache is full");
 2450         return;
 2451       }
 2452 
 2453       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 2454         // Calls of the same statically bound method can share
 2455         // a stub to the interpreter.
 2456         __ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
 2457       } else {
 2458         // Emit stub for static call
 2459         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 2460         if (stub == nullptr) {
 2461           ciEnv::current()->record_failure("CodeCache is full");
 2462           return;
 2463         }
 2464       }
 2465     }
 2466 
 2467     __ post_call_nop();
 2468   %}
 2469 
 2470   enc_class riscv_enc_java_dynamic_call(method meth) %{
 2471     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2472     int method_index = resolved_method_index(masm);
 2473     address call = __ ic_call((address)$meth$$method, method_index);
 2474     if (call == nullptr) {
 2475       ciEnv::current()->record_failure("CodeCache is full");
 2476       return;
 2477     }
 2478 
 2479     __ post_call_nop();
 2480   %}
 2481 
 2482   enc_class riscv_enc_call_epilog() %{
 2483     if (VerifyStackAtCalls) {
 2484       // Check that stack depth is unchanged: find majik cookie on stack
 2485       __ call_Unimplemented();
 2486     }
 2487   %}
 2488 
 2489   enc_class riscv_enc_java_to_runtime(method meth) %{
 2490     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2491 
 2492     // some calls to generated routines (arraycopy code) are scheduled
 2493     // by C2 as runtime calls. if so we can call them using a jr (they
 2494     // will be in a reachable segment) otherwise we have to use a jalr
 2495     // which loads the absolute address into a register.
 2496     address entry = (address)$meth$$method;
 2497     CodeBlob *cb = CodeCache::find_blob(entry);
 2498     if (cb != nullptr) {
 2499       address call = __ reloc_call(Address(entry, relocInfo::runtime_call_type));
 2500       if (call == nullptr) {
 2501         ciEnv::current()->record_failure("CodeCache is full");
 2502         return;
 2503       }
 2504       __ post_call_nop();
 2505     } else {
 2506       Label retaddr;
 2507       __ la(t1, retaddr);
 2508       __ la(t0, RuntimeAddress(entry));
 2509       // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
 2510       __ addi(sp, sp, -2 * wordSize);
 2511       __ sd(t1, Address(sp, wordSize));
 2512       __ jalr(t0);
 2513       __ bind(retaddr);
 2514       __ post_call_nop();
 2515       __ addi(sp, sp, 2 * wordSize);
 2516     }
 2517   %}
 2518 
 2519   // arithmetic encodings
 2520 
 2521   enc_class riscv_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 2522     Register dst_reg = as_Register($dst$$reg);
 2523     Register src1_reg = as_Register($src1$$reg);
 2524     Register src2_reg = as_Register($src2$$reg);
 2525     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ true);
 2526   %}
 2527 
 2528   enc_class riscv_enc_divuw(iRegI dst, iRegI src1, iRegI src2) %{
 2529     Register dst_reg = as_Register($dst$$reg);
 2530     Register src1_reg = as_Register($src1$$reg);
 2531     Register src2_reg = as_Register($src2$$reg);
 2532     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ false);
 2533   %}
 2534 
 2535   enc_class riscv_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 2536     Register dst_reg = as_Register($dst$$reg);
 2537     Register src1_reg = as_Register($src1$$reg);
 2538     Register src2_reg = as_Register($src2$$reg);
 2539     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ true);
 2540   %}
 2541 
 2542   enc_class riscv_enc_divu(iRegI dst, iRegI src1, iRegI src2) %{
 2543     Register dst_reg = as_Register($dst$$reg);
 2544     Register src1_reg = as_Register($src1$$reg);
 2545     Register src2_reg = as_Register($src2$$reg);
 2546     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ false, /* is_signed */ false);
 2547   %}
 2548 
 2549   enc_class riscv_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 2550     Register dst_reg = as_Register($dst$$reg);
 2551     Register src1_reg = as_Register($src1$$reg);
 2552     Register src2_reg = as_Register($src2$$reg);
 2553     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ true);
 2554   %}
 2555 
 2556   enc_class riscv_enc_moduw(iRegI dst, iRegI src1, iRegI src2) %{
 2557     Register dst_reg = as_Register($dst$$reg);
 2558     Register src1_reg = as_Register($src1$$reg);
 2559     Register src2_reg = as_Register($src2$$reg);
 2560     __ corrected_idivl(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ false);
 2561   %}
 2562 
 2563   enc_class riscv_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 2564     Register dst_reg = as_Register($dst$$reg);
 2565     Register src1_reg = as_Register($src1$$reg);
 2566     Register src2_reg = as_Register($src2$$reg);
 2567     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ true);
 2568   %}
 2569 
 2570   enc_class riscv_enc_modu(iRegI dst, iRegI src1, iRegI src2) %{
 2571     Register dst_reg = as_Register($dst$$reg);
 2572     Register src1_reg = as_Register($src1$$reg);
 2573     Register src2_reg = as_Register($src2$$reg);
 2574     __ corrected_idivq(dst_reg, src1_reg, src2_reg, /* want_remainder */ true, /* is_signed */ false);
 2575   %}
 2576 
 2577   enc_class riscv_enc_tail_call(iRegP jump_target) %{
 2578     Register target_reg = as_Register($jump_target$$reg);
 2579     __ jr(target_reg);
 2580   %}
 2581 
 2582   enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
 2583     Register target_reg = as_Register($jump_target$$reg);
 2584     // exception oop should be in x10
 2585     // ret addr has been popped into ra
 2586     // callee expects it in x13
 2587     __ mv(x13, ra);
 2588     __ jr(target_reg);
 2589   %}
 2590 
 2591   enc_class riscv_enc_rethrow() %{
 2592     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 2593   %}
 2594 
 2595   enc_class riscv_enc_ret() %{
 2596     __ ret();
 2597   %}
 2598 
 2599 %}
 2600 
 2601 //----------FRAME--------------------------------------------------------------
 2602 // Definition of frame structure and management information.
 2603 //
 2604 //  S T A C K   L A Y O U T    Allocators stack-slot number
 2605 //                             |   (to get allocators register number
 2606 //  G  Owned by    |        |  v    add OptoReg::stack0())
 2607 //  r   CALLER     |        |
 2608 //  o     |        +--------+      pad to even-align allocators stack-slot
 2609 //  w     V        |  pad0  |        numbers; owned by CALLER
 2610 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 2611 //  h     ^        |   in   |  5
 2612 //        |        |  args  |  4   Holes in incoming args owned by SELF
 2613 //  |     |        |        |  3
 2614 //  |     |        +--------+
 2615 //  V     |        | old out|      Empty on Intel, window on Sparc
 2616 //        |    old |preserve|      Must be even aligned.
 2617 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 2618 //        |        |   in   |  3   area for Intel ret address
 2619 //     Owned by    |preserve|      Empty on Sparc.
 2620 //       SELF      +--------+
 2621 //        |        |  pad2  |  2   pad to align old SP
 2622 //        |        +--------+  1
 2623 //        |        | locks  |  0
 2624 //        |        +--------+----> OptoReg::stack0(), even aligned
 2625 //        |        |  pad1  | 11   pad to align new SP
 2626 //        |        +--------+
 2627 //        |        |        | 10
 2628 //        |        | spills |  9   spills
 2629 //        V        |        |  8   (pad0 slot for callee)
 2630 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 2631 //        ^        |  out   |  7
 2632 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 2633 //     Owned by    +--------+
 2634 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 2635 //        |    new |preserve|      Must be even-aligned.
 2636 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 2637 //        |        |        |
 2638 //
 2639 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 2640 //         known from SELF's arguments and the Java calling convention.
 2641 //         Region 6-7 is determined per call site.
 2642 // Note 2: If the calling convention leaves holes in the incoming argument
 2643 //         area, those holes are owned by SELF.  Holes in the outgoing area
 2644 //         are owned by the CALLEE.  Holes should not be necessary in the
 2645 //         incoming area, as the Java calling convention is completely under
 2646 //         the control of the AD file.  Doubles can be sorted and packed to
 2647 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 2648 //         varargs C calling conventions.
 2649 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 2650 //         even aligned with pad0 as needed.
 2651 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 2652 //           (the latter is true on Intel but is it false on RISCV?)
 2653 //         region 6-11 is even aligned; it may be padded out more so that
 2654 //         the region from SP to FP meets the minimum stack alignment.
 2655 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 2656 //         alignment.  Region 11, pad1, may be dynamically extended so that
 2657 //         SP meets the minimum alignment.
 2658 
 2659 frame %{
 2660   // These three registers define part of the calling convention
 2661   // between compiled code and the interpreter.
 2662 
 2663   // Inline Cache Register or methodOop for I2C.
 2664   inline_cache_reg(R31);
 2665 
 2666   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
 2667   cisc_spilling_operand_name(indOffset);
 2668 
 2669   // Number of stack slots consumed by locking an object
 2670   // generate Compile::sync_stack_slots
 2671   // VMRegImpl::slots_per_word = wordSize / stack_slot_size = 8 / 4 = 2
 2672   sync_stack_slots(1 * VMRegImpl::slots_per_word);
 2673 
 2674   // Compiled code's Frame Pointer
 2675   frame_pointer(R2);
 2676 
 2677   // Interpreter stores its frame pointer in a register which is
 2678   // stored to the stack by I2CAdaptors.
 2679   // I2CAdaptors convert from interpreted java to compiled java.
 2680   interpreter_frame_pointer(R8);
 2681 
 2682   // Stack alignment requirement
 2683   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 2684 
 2685   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 2686   // for calls to C.  Supports the var-args backing area for register parms.
 2687   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes / BytesPerInt);
 2688 
 2689   // The after-PROLOG location of the return address.  Location of
 2690   // return address specifies a type (REG or STACK) and a number
 2691   // representing the register number (i.e. - use a register name) or
 2692   // stack slot.
 2693   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 2694   // Otherwise, it is above the locks and verification slot and alignment word
 2695   // TODO this may well be correct but need to check why that - 2 is there
 2696   // ppc port uses 0 but we definitely need to allow for fixed_slots
 2697   // which folds in the space used for monitors
 2698   return_addr(STACK - 2 +
 2699               align_up((Compile::current()->in_preserve_stack_slots() +
 2700                         Compile::current()->fixed_slots()),
 2701                        stack_alignment_in_slots()));
 2702 
 2703   // Location of compiled Java return values.  Same as C for now.
 2704   return_value
 2705   %{
 2706     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 2707            "only return normal values");
 2708 
 2709     static const int lo[Op_RegL + 1] = { // enum name
 2710       0,                                 // Op_Node
 2711       0,                                 // Op_Set
 2712       R10_num,                           // Op_RegN
 2713       R10_num,                           // Op_RegI
 2714       R10_num,                           // Op_RegP
 2715       F10_num,                           // Op_RegF
 2716       F10_num,                           // Op_RegD
 2717       R10_num                            // Op_RegL
 2718     };
 2719 
 2720     static const int hi[Op_RegL + 1] = { // enum name
 2721       0,                                 // Op_Node
 2722       0,                                 // Op_Set
 2723       OptoReg::Bad,                      // Op_RegN
 2724       OptoReg::Bad,                      // Op_RegI
 2725       R10_H_num,                         // Op_RegP
 2726       OptoReg::Bad,                      // Op_RegF
 2727       F10_H_num,                         // Op_RegD
 2728       R10_H_num                          // Op_RegL
 2729     };
 2730 
 2731     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 2732   %}
 2733 %}
 2734 
 2735 //----------ATTRIBUTES---------------------------------------------------------
 2736 //----------Operand Attributes-------------------------------------------------
 2737 op_attrib op_cost(1);        // Required cost attribute
 2738 
 2739 //----------Instruction Attributes---------------------------------------------
 2740 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
 2741 ins_attrib ins_size(32);        // Required size attribute (in bits)
 2742 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 2743                                 // a non-matching short branch variant
 2744                                 // of some long branch?
 2745 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 2746                                 // be a power of 2) specifies the
 2747                                 // alignment that some part of the
 2748                                 // instruction (not necessarily the
 2749                                 // start) requires.  If > 1, a
 2750                                 // compute_padding() function must be
 2751                                 // provided for the instruction
 2752 
 2753 //----------OPERANDS-----------------------------------------------------------
 2754 // Operand definitions must precede instruction definitions for correct parsing
 2755 // in the ADLC because operands constitute user defined types which are used in
 2756 // instruction definitions.
 2757 
 2758 //----------Simple Operands----------------------------------------------------
 2759 
 2760 // Integer operands 32 bit
 2761 // 32 bit immediate
 2762 operand immI()
 2763 %{
 2764   match(ConI);
 2765 
 2766   op_cost(0);
 2767   format %{ %}
 2768   interface(CONST_INTER);
 2769 %}
 2770 
 2771 // 32 bit zero
 2772 operand immI0()
 2773 %{
 2774   predicate(n->get_int() == 0);
 2775   match(ConI);
 2776 
 2777   op_cost(0);
 2778   format %{ %}
 2779   interface(CONST_INTER);
 2780 %}
 2781 
 2782 // 32 bit unit increment
 2783 operand immI_1()
 2784 %{
 2785   predicate(n->get_int() == 1);
 2786   match(ConI);
 2787 
 2788   op_cost(0);
 2789   format %{ %}
 2790   interface(CONST_INTER);
 2791 %}
 2792 
 2793 // 32 bit unit decrement
 2794 operand immI_M1()
 2795 %{
 2796   predicate(n->get_int() == -1);
 2797   match(ConI);
 2798 
 2799   op_cost(0);
 2800   format %{ %}
 2801   interface(CONST_INTER);
 2802 %}
 2803 
 2804 // Unsigned Integer Immediate:  6-bit int, greater than 32
 2805 operand uimmI6_ge32() %{
 2806   predicate(((unsigned int)(n->get_int()) < 64) && (n->get_int() >= 32));
 2807   match(ConI);
 2808   op_cost(0);
 2809   format %{ %}
 2810   interface(CONST_INTER);
 2811 %}
 2812 
 2813 operand immI_le_4()
 2814 %{
 2815   predicate(n->get_int() <= 4);
 2816   match(ConI);
 2817 
 2818   op_cost(0);
 2819   format %{ %}
 2820   interface(CONST_INTER);
 2821 %}
 2822 
 2823 operand immI_16()
 2824 %{
 2825   predicate(n->get_int() == 16);
 2826   match(ConI);
 2827   op_cost(0);
 2828   format %{ %}
 2829   interface(CONST_INTER);
 2830 %}
 2831 
 2832 operand immI_24()
 2833 %{
 2834   predicate(n->get_int() == 24);
 2835   match(ConI);
 2836   op_cost(0);
 2837   format %{ %}
 2838   interface(CONST_INTER);
 2839 %}
 2840 
 2841 operand immI_31()
 2842 %{
 2843   predicate(n->get_int() == 31);
 2844   match(ConI);
 2845 
 2846   op_cost(0);
 2847   format %{ %}
 2848   interface(CONST_INTER);
 2849 %}
 2850 
 2851 operand immI_63()
 2852 %{
 2853   predicate(n->get_int() == 63);
 2854   match(ConI);
 2855 
 2856   op_cost(0);
 2857   format %{ %}
 2858   interface(CONST_INTER);
 2859 %}
 2860 
 2861 // 32 bit integer valid for add immediate
 2862 operand immIAdd()
 2863 %{
 2864   predicate(Assembler::is_simm12((int64_t)n->get_int()));
 2865   match(ConI);
 2866   op_cost(0);
 2867   format %{ %}
 2868   interface(CONST_INTER);
 2869 %}
 2870 
 2871 // 32 bit integer valid for sub immediate
 2872 operand immISub()
 2873 %{
 2874   predicate(Assembler::is_simm12(-(int64_t)n->get_int()));
 2875   match(ConI);
 2876   op_cost(0);
 2877   format %{ %}
 2878   interface(CONST_INTER);
 2879 %}
 2880 
 2881 // 5 bit signed value.
 2882 operand immI5()
 2883 %{
 2884   predicate(n->get_int() <= 15 && n->get_int() >= -16);
 2885   match(ConI);
 2886 
 2887   op_cost(0);
 2888   format %{ %}
 2889   interface(CONST_INTER);
 2890 %}
 2891 
 2892 // 5 bit signed value (simm5)
 2893 operand immL5()
 2894 %{
 2895   predicate(n->get_long() <= 15 && n->get_long() >= -16);
 2896   match(ConL);
 2897 
 2898   op_cost(0);
 2899   format %{ %}
 2900   interface(CONST_INTER);
 2901 %}
 2902 
 2903 // Integer operands 64 bit
 2904 // 64 bit immediate
 2905 operand immL()
 2906 %{
 2907   match(ConL);
 2908 
 2909   op_cost(0);
 2910   format %{ %}
 2911   interface(CONST_INTER);
 2912 %}
 2913 
 2914 // 64 bit zero
 2915 operand immL0()
 2916 %{
 2917   predicate(n->get_long() == 0);
 2918   match(ConL);
 2919 
 2920   op_cost(0);
 2921   format %{ %}
 2922   interface(CONST_INTER);
 2923 %}
 2924 
 2925 // Pointer operands
 2926 // Pointer Immediate
 2927 operand immP()
 2928 %{
 2929   match(ConP);
 2930 
 2931   op_cost(0);
 2932   format %{ %}
 2933   interface(CONST_INTER);
 2934 %}
 2935 
 2936 // Null Pointer Immediate
 2937 operand immP0()
 2938 %{
 2939   predicate(n->get_ptr() == 0);
 2940   match(ConP);
 2941 
 2942   op_cost(0);
 2943   format %{ %}
 2944   interface(CONST_INTER);
 2945 %}
 2946 
 2947 // Pointer Immediate One
 2948 // this is used in object initialization (initial object header)
 2949 operand immP_1()
 2950 %{
 2951   predicate(n->get_ptr() == 1);
 2952   match(ConP);
 2953 
 2954   op_cost(0);
 2955   format %{ %}
 2956   interface(CONST_INTER);
 2957 %}
 2958 
 2959 // Card Table Byte Map Base
 2960 operand immByteMapBase()
 2961 %{
 2962   // Get base of card map
 2963   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 2964             (CardTable::CardValue*)n->get_ptr() ==
 2965              ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 2966   match(ConP);
 2967 
 2968   op_cost(0);
 2969   format %{ %}
 2970   interface(CONST_INTER);
 2971 %}
 2972 
 2973 // Int Immediate: low 16-bit mask
 2974 operand immI_16bits()
 2975 %{
 2976   predicate(n->get_int() == 0xFFFF);
 2977   match(ConI);
 2978   op_cost(0);
 2979   format %{ %}
 2980   interface(CONST_INTER);
 2981 %}
 2982 
 2983 operand immIpowerOf2() %{
 2984   predicate(is_power_of_2((juint)(n->get_int())));
 2985   match(ConI);
 2986   op_cost(0);
 2987   format %{ %}
 2988   interface(CONST_INTER);
 2989 %}
 2990 
 2991 // Long Immediate: low 32-bit mask
 2992 operand immL_32bits()
 2993 %{
 2994   predicate(n->get_long() == 0xFFFFFFFFL);
 2995   match(ConL);
 2996   op_cost(0);
 2997   format %{ %}
 2998   interface(CONST_INTER);
 2999 %}
 3000 
 3001 // 64 bit unit decrement
 3002 operand immL_M1()
 3003 %{
 3004   predicate(n->get_long() == -1);
 3005   match(ConL);
 3006 
 3007   op_cost(0);
 3008   format %{ %}
 3009   interface(CONST_INTER);
 3010 %}
 3011 
 3012 
 3013 // 64 bit integer valid for add immediate
 3014 operand immLAdd()
 3015 %{
 3016   predicate(Assembler::is_simm12(n->get_long()));
 3017   match(ConL);
 3018   op_cost(0);
 3019   format %{ %}
 3020   interface(CONST_INTER);
 3021 %}
 3022 
 3023 // 64 bit integer valid for sub immediate
 3024 operand immLSub()
 3025 %{
 3026   predicate(Assembler::is_simm12(-(n->get_long())));
 3027   match(ConL);
 3028   op_cost(0);
 3029   format %{ %}
 3030   interface(CONST_INTER);
 3031 %}
 3032 
 3033 // Narrow pointer operands
 3034 // Narrow Pointer Immediate
 3035 operand immN()
 3036 %{
 3037   match(ConN);
 3038 
 3039   op_cost(0);
 3040   format %{ %}
 3041   interface(CONST_INTER);
 3042 %}
 3043 
 3044 // Narrow Null Pointer Immediate
 3045 operand immN0()
 3046 %{
 3047   predicate(n->get_narrowcon() == 0);
 3048   match(ConN);
 3049 
 3050   op_cost(0);
 3051   format %{ %}
 3052   interface(CONST_INTER);
 3053 %}
 3054 
 3055 operand immNKlass()
 3056 %{
 3057   match(ConNKlass);
 3058 
 3059   op_cost(0);
 3060   format %{ %}
 3061   interface(CONST_INTER);
 3062 %}
 3063 
 3064 // Float and Double operands
 3065 // Double Immediate
 3066 operand immD()
 3067 %{
 3068   match(ConD);
 3069   op_cost(0);
 3070   format %{ %}
 3071   interface(CONST_INTER);
 3072 %}
 3073 
 3074 // Double Immediate: +0.0d
 3075 operand immD0()
 3076 %{
 3077   predicate(jlong_cast(n->getd()) == 0);
 3078   match(ConD);
 3079 
 3080   op_cost(0);
 3081   format %{ %}
 3082   interface(CONST_INTER);
 3083 %}
 3084 
 3085 // Float Immediate
 3086 operand immF()
 3087 %{
 3088   match(ConF);
 3089   op_cost(0);
 3090   format %{ %}
 3091   interface(CONST_INTER);
 3092 %}
 3093 
 3094 // Float Immediate: +0.0f.
 3095 operand immF0()
 3096 %{
 3097   predicate(jint_cast(n->getf()) == 0);
 3098   match(ConF);
 3099 
 3100   op_cost(0);
 3101   format %{ %}
 3102   interface(CONST_INTER);
 3103 %}
 3104 
 3105 operand immIOffset()
 3106 %{
 3107   predicate(Assembler::is_simm12(n->get_int()));
 3108   match(ConI);
 3109   op_cost(0);
 3110   format %{ %}
 3111   interface(CONST_INTER);
 3112 %}
 3113 
 3114 operand immLOffset()
 3115 %{
 3116   predicate(Assembler::is_simm12(n->get_long()));
 3117   match(ConL);
 3118   op_cost(0);
 3119   format %{ %}
 3120   interface(CONST_INTER);
 3121 %}
 3122 
 3123 // Scale values
 3124 operand immIScale()
 3125 %{
 3126   predicate(1 <= n->get_int() && (n->get_int() <= 3));
 3127   match(ConI);
 3128 
 3129   op_cost(0);
 3130   format %{ %}
 3131   interface(CONST_INTER);
 3132 %}
 3133 
 3134 // Integer 32 bit Register Operands
 3135 operand iRegI()
 3136 %{
 3137   constraint(ALLOC_IN_RC(any_reg32));
 3138   match(RegI);
 3139   match(iRegINoSp);
 3140   op_cost(0);
 3141   format %{ %}
 3142   interface(REG_INTER);
 3143 %}
 3144 
 3145 // Integer 32 bit Register not Special
 3146 operand iRegINoSp()
 3147 %{
 3148   constraint(ALLOC_IN_RC(no_special_reg32));
 3149   match(RegI);
 3150   op_cost(0);
 3151   format %{ %}
 3152   interface(REG_INTER);
 3153 %}
 3154 
 3155 // Register R10 only
 3156 operand iRegI_R10()
 3157 %{
 3158   constraint(ALLOC_IN_RC(int_r10_reg));
 3159   match(RegI);
 3160   match(iRegINoSp);
 3161   op_cost(0);
 3162   format %{ %}
 3163   interface(REG_INTER);
 3164 %}
 3165 
 3166 // Register R12 only
 3167 operand iRegI_R12()
 3168 %{
 3169   constraint(ALLOC_IN_RC(int_r12_reg));
 3170   match(RegI);
 3171   match(iRegINoSp);
 3172   op_cost(0);
 3173   format %{ %}
 3174   interface(REG_INTER);
 3175 %}
 3176 
 3177 // Register R13 only
 3178 operand iRegI_R13()
 3179 %{
 3180   constraint(ALLOC_IN_RC(int_r13_reg));
 3181   match(RegI);
 3182   match(iRegINoSp);
 3183   op_cost(0);
 3184   format %{ %}
 3185   interface(REG_INTER);
 3186 %}
 3187 
 3188 // Register R14 only
 3189 operand iRegI_R14()
 3190 %{
 3191   constraint(ALLOC_IN_RC(int_r14_reg));
 3192   match(RegI);
 3193   match(iRegINoSp);
 3194   op_cost(0);
 3195   format %{ %}
 3196   interface(REG_INTER);
 3197 %}
 3198 
 3199 // Integer 64 bit Register Operands
 3200 operand iRegL()
 3201 %{
 3202   constraint(ALLOC_IN_RC(any_reg));
 3203   match(RegL);
 3204   match(iRegLNoSp);
 3205   op_cost(0);
 3206   format %{ %}
 3207   interface(REG_INTER);
 3208 %}
 3209 
 3210 // Integer 64 bit Register not Special
 3211 operand iRegLNoSp()
 3212 %{
 3213   constraint(ALLOC_IN_RC(no_special_reg));
 3214   match(RegL);
 3215   match(iRegL_R10);
 3216   format %{ %}
 3217   interface(REG_INTER);
 3218 %}
 3219 
 3220 // Long 64 bit Register R29 only
 3221 operand iRegL_R29()
 3222 %{
 3223   constraint(ALLOC_IN_RC(r29_reg));
 3224   match(RegL);
 3225   match(iRegLNoSp);
 3226   op_cost(0);
 3227   format %{ %}
 3228   interface(REG_INTER);
 3229 %}
 3230 
 3231 // Long 64 bit Register R30 only
 3232 operand iRegL_R30()
 3233 %{
 3234   constraint(ALLOC_IN_RC(r30_reg));
 3235   match(RegL);
 3236   match(iRegLNoSp);
 3237   op_cost(0);
 3238   format %{ %}
 3239   interface(REG_INTER);
 3240 %}
 3241 
 3242 // Pointer Register Operands
 3243 // Pointer Register
 3244 operand iRegP()
 3245 %{
 3246   constraint(ALLOC_IN_RC(ptr_reg));
 3247   match(RegP);
 3248   match(iRegPNoSp);
 3249   match(iRegP_R10);
 3250   match(iRegP_R15);
 3251   match(javaThread_RegP);
 3252   op_cost(0);
 3253   format %{ %}
 3254   interface(REG_INTER);
 3255 %}
 3256 
 3257 // Pointer 64 bit Register not Special
 3258 operand iRegPNoSp()
 3259 %{
 3260   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 3261   match(RegP);
 3262   op_cost(0);
 3263   format %{ %}
 3264   interface(REG_INTER);
 3265 %}
 3266 
 3267 // This operand is not allowed to use fp even if
 3268 // fp is not used to hold the frame pointer.
 3269 operand iRegPNoSpNoFp()
 3270 %{
 3271   constraint(ALLOC_IN_RC(no_special_no_fp_ptr_reg));
 3272   match(RegP);
 3273   match(iRegPNoSp);
 3274   op_cost(0);
 3275   format %{ %}
 3276   interface(REG_INTER);
 3277 %}
 3278 
 3279 operand iRegP_R10()
 3280 %{
 3281   constraint(ALLOC_IN_RC(r10_reg));
 3282   match(RegP);
 3283   // match(iRegP);
 3284   match(iRegPNoSp);
 3285   op_cost(0);
 3286   format %{ %}
 3287   interface(REG_INTER);
 3288 %}
 3289 
 3290 // Pointer 64 bit Register R11 only
 3291 operand iRegP_R11()
 3292 %{
 3293   constraint(ALLOC_IN_RC(r11_reg));
 3294   match(RegP);
 3295   match(iRegPNoSp);
 3296   op_cost(0);
 3297   format %{ %}
 3298   interface(REG_INTER);
 3299 %}
 3300 
 3301 operand iRegP_R12()
 3302 %{
 3303   constraint(ALLOC_IN_RC(r12_reg));
 3304   match(RegP);
 3305   // match(iRegP);
 3306   match(iRegPNoSp);
 3307   op_cost(0);
 3308   format %{ %}
 3309   interface(REG_INTER);
 3310 %}
 3311 
 3312 // Pointer 64 bit Register R13 only
 3313 operand iRegP_R13()
 3314 %{
 3315   constraint(ALLOC_IN_RC(r13_reg));
 3316   match(RegP);
 3317   match(iRegPNoSp);
 3318   op_cost(0);
 3319   format %{ %}
 3320   interface(REG_INTER);
 3321 %}
 3322 
 3323 operand iRegP_R14()
 3324 %{
 3325   constraint(ALLOC_IN_RC(r14_reg));
 3326   match(RegP);
 3327   // match(iRegP);
 3328   match(iRegPNoSp);
 3329   op_cost(0);
 3330   format %{ %}
 3331   interface(REG_INTER);
 3332 %}
 3333 
 3334 operand iRegP_R15()
 3335 %{
 3336   constraint(ALLOC_IN_RC(r15_reg));
 3337   match(RegP);
 3338   // match(iRegP);
 3339   match(iRegPNoSp);
 3340   op_cost(0);
 3341   format %{ %}
 3342   interface(REG_INTER);
 3343 %}
 3344 
 3345 operand iRegP_R16()
 3346 %{
 3347   constraint(ALLOC_IN_RC(r16_reg));
 3348   match(RegP);
 3349   match(iRegPNoSp);
 3350   op_cost(0);
 3351   format %{ %}
 3352   interface(REG_INTER);
 3353 %}
 3354 
 3355 // Pointer 64 bit Register R28 only
 3356 operand iRegP_R28()
 3357 %{
 3358   constraint(ALLOC_IN_RC(r28_reg));
 3359   match(RegP);
 3360   match(iRegPNoSp);
 3361   op_cost(0);
 3362   format %{ %}
 3363   interface(REG_INTER);
 3364 %}
 3365 
 3366 // Pointer 64 bit Register R30 only
 3367 operand iRegP_R30()
 3368 %{
 3369   constraint(ALLOC_IN_RC(r30_reg));
 3370   match(RegP);
 3371   match(iRegPNoSp);
 3372   op_cost(0);
 3373   format %{ %}
 3374   interface(REG_INTER);
 3375 %}
 3376 
 3377 // Pointer 64 bit Register R31 only
 3378 operand iRegP_R31()
 3379 %{
 3380   constraint(ALLOC_IN_RC(r31_reg));
 3381   match(RegP);
 3382   match(iRegPNoSp);
 3383   op_cost(0);
 3384   format %{ %}
 3385   interface(REG_INTER);
 3386 %}
 3387 
 3388 // Pointer Register Operands
 3389 // Narrow Pointer Register
 3390 operand iRegN()
 3391 %{
 3392   constraint(ALLOC_IN_RC(any_reg32));
 3393   match(RegN);
 3394   match(iRegNNoSp);
 3395   op_cost(0);
 3396   format %{ %}
 3397   interface(REG_INTER);
 3398 %}
 3399 
 3400 // Integer 64 bit Register not Special
 3401 operand iRegNNoSp()
 3402 %{
 3403   constraint(ALLOC_IN_RC(no_special_reg32));
 3404   match(RegN);
 3405   op_cost(0);
 3406   format %{ %}
 3407   interface(REG_INTER);
 3408 %}
 3409 
 3410 // Long 64 bit Register R10 only
 3411 operand iRegL_R10()
 3412 %{
 3413   constraint(ALLOC_IN_RC(r10_reg));
 3414   match(RegL);
 3415   match(iRegLNoSp);
 3416   op_cost(0);
 3417   format %{ %}
 3418   interface(REG_INTER);
 3419 %}
 3420 
 3421 // Float Register
 3422 // Float register operands
 3423 operand fRegF()
 3424 %{
 3425   constraint(ALLOC_IN_RC(float_reg));
 3426   match(RegF);
 3427 
 3428   op_cost(0);
 3429   format %{ %}
 3430   interface(REG_INTER);
 3431 %}
 3432 
 3433 // Double Register
 3434 // Double register operands
 3435 operand fRegD()
 3436 %{
 3437   constraint(ALLOC_IN_RC(double_reg));
 3438   match(RegD);
 3439 
 3440   op_cost(0);
 3441   format %{ %}
 3442   interface(REG_INTER);
 3443 %}
 3444 
 3445 // Generic vector class. This will be used for
 3446 // all vector operands.
 3447 operand vReg()
 3448 %{
 3449   constraint(ALLOC_IN_RC(vectora_reg));
 3450   match(VecA);
 3451   op_cost(0);
 3452   format %{ %}
 3453   interface(REG_INTER);
 3454 %}
 3455 
 3456 operand vReg_V1()
 3457 %{
 3458   constraint(ALLOC_IN_RC(v1_reg));
 3459   match(VecA);
 3460   match(vReg);
 3461   op_cost(0);
 3462   format %{ %}
 3463   interface(REG_INTER);
 3464 %}
 3465 
 3466 operand vReg_V2()
 3467 %{
 3468   constraint(ALLOC_IN_RC(v2_reg));
 3469   match(VecA);
 3470   match(vReg);
 3471   op_cost(0);
 3472   format %{ %}
 3473   interface(REG_INTER);
 3474 %}
 3475 
 3476 operand vReg_V3()
 3477 %{
 3478   constraint(ALLOC_IN_RC(v3_reg));
 3479   match(VecA);
 3480   match(vReg);
 3481   op_cost(0);
 3482   format %{ %}
 3483   interface(REG_INTER);
 3484 %}
 3485 
 3486 operand vReg_V4()
 3487 %{
 3488   constraint(ALLOC_IN_RC(v4_reg));
 3489   match(VecA);
 3490   match(vReg);
 3491   op_cost(0);
 3492   format %{ %}
 3493   interface(REG_INTER);
 3494 %}
 3495 
 3496 operand vReg_V5()
 3497 %{
 3498   constraint(ALLOC_IN_RC(v5_reg));
 3499   match(VecA);
 3500   match(vReg);
 3501   op_cost(0);
 3502   format %{ %}
 3503   interface(REG_INTER);
 3504 %}
 3505 
 3506 operand vReg_V6()
 3507 %{
 3508   constraint(ALLOC_IN_RC(v6_reg));
 3509   match(VecA);
 3510   match(vReg);
 3511   op_cost(0);
 3512   format %{ %}
 3513   interface(REG_INTER);
 3514 %}
 3515 
 3516 operand vReg_V7()
 3517 %{
 3518   constraint(ALLOC_IN_RC(v7_reg));
 3519   match(VecA);
 3520   match(vReg);
 3521   op_cost(0);
 3522   format %{ %}
 3523   interface(REG_INTER);
 3524 %}
 3525 
 3526 operand vReg_V8()
 3527 %{
 3528   constraint(ALLOC_IN_RC(v8_reg));
 3529   match(VecA);
 3530   match(vReg);
 3531   op_cost(0);
 3532   format %{ %}
 3533   interface(REG_INTER);
 3534 %}
 3535 
 3536 operand vReg_V9()
 3537 %{
 3538   constraint(ALLOC_IN_RC(v9_reg));
 3539   match(VecA);
 3540   match(vReg);
 3541   op_cost(0);
 3542   format %{ %}
 3543   interface(REG_INTER);
 3544 %}
 3545 
 3546 operand vReg_V10()
 3547 %{
 3548   constraint(ALLOC_IN_RC(v10_reg));
 3549   match(VecA);
 3550   match(vReg);
 3551   op_cost(0);
 3552   format %{ %}
 3553   interface(REG_INTER);
 3554 %}
 3555 
 3556 operand vReg_V11()
 3557 %{
 3558   constraint(ALLOC_IN_RC(v11_reg));
 3559   match(VecA);
 3560   match(vReg);
 3561   op_cost(0);
 3562   format %{ %}
 3563   interface(REG_INTER);
 3564 %}
 3565 
 3566 operand vReg_V12()
 3567 %{
 3568   constraint(ALLOC_IN_RC(v12_reg));
 3569   match(VecA);
 3570   match(vReg);
 3571   op_cost(0);
 3572   format %{ %}
 3573   interface(REG_INTER);
 3574 %}
 3575 
 3576 operand vReg_V13()
 3577 %{
 3578   constraint(ALLOC_IN_RC(v13_reg));
 3579   match(VecA);
 3580   match(vReg);
 3581   op_cost(0);
 3582   format %{ %}
 3583   interface(REG_INTER);
 3584 %}
 3585 
 3586 operand vReg_V14()
 3587 %{
 3588   constraint(ALLOC_IN_RC(v14_reg));
 3589   match(VecA);
 3590   match(vReg);
 3591   op_cost(0);
 3592   format %{ %}
 3593   interface(REG_INTER);
 3594 %}
 3595 
 3596 operand vReg_V15()
 3597 %{
 3598   constraint(ALLOC_IN_RC(v15_reg));
 3599   match(VecA);
 3600   match(vReg);
 3601   op_cost(0);
 3602   format %{ %}
 3603   interface(REG_INTER);
 3604 %}
 3605 
 3606 operand vRegMask()
 3607 %{
 3608   constraint(ALLOC_IN_RC(vmask_reg));
 3609   match(RegVectMask);
 3610   match(vRegMask_V0);
 3611   op_cost(0);
 3612   format %{ %}
 3613   interface(REG_INTER);
 3614 %}
 3615 
 3616 // The mask value used to control execution of a masked
 3617 // vector instruction is always supplied by vector register v0.
 3618 operand vRegMask_V0()
 3619 %{
 3620   constraint(ALLOC_IN_RC(vmask_reg_v0));
 3621   match(RegVectMask);
 3622   match(vRegMask);
 3623   op_cost(0);
 3624   format %{ %}
 3625   interface(REG_INTER);
 3626 %}
 3627 
 3628 // Java Thread Register
 3629 operand javaThread_RegP(iRegP reg)
 3630 %{
 3631   constraint(ALLOC_IN_RC(java_thread_reg)); // java_thread_reg
 3632   match(reg);
 3633   op_cost(0);
 3634   format %{ %}
 3635   interface(REG_INTER);
 3636 %}
 3637 
 3638 //----------Memory Operands----------------------------------------------------
 3639 // RISCV has only base_plus_offset and literal address mode, so no need to use
 3640 // index and scale. Here set index as 0xffffffff and scale as 0x0.
 3641 operand indirect(iRegP reg)
 3642 %{
 3643   constraint(ALLOC_IN_RC(ptr_reg));
 3644   match(reg);
 3645   op_cost(0);
 3646   format %{ "[$reg]" %}
 3647   interface(MEMORY_INTER) %{
 3648     base($reg);
 3649     index(0xffffffff);
 3650     scale(0x0);
 3651     disp(0x0);
 3652   %}
 3653 %}
 3654 
 3655 operand indOffI(iRegP reg, immIOffset off)
 3656 %{
 3657   constraint(ALLOC_IN_RC(ptr_reg));
 3658   match(AddP reg off);
 3659   op_cost(0);
 3660   format %{ "[$reg, $off]" %}
 3661   interface(MEMORY_INTER) %{
 3662     base($reg);
 3663     index(0xffffffff);
 3664     scale(0x0);
 3665     disp($off);
 3666   %}
 3667 %}
 3668 
 3669 operand indOffL(iRegP reg, immLOffset off)
 3670 %{
 3671   constraint(ALLOC_IN_RC(ptr_reg));
 3672   match(AddP reg off);
 3673   op_cost(0);
 3674   format %{ "[$reg, $off]" %}
 3675   interface(MEMORY_INTER) %{
 3676     base($reg);
 3677     index(0xffffffff);
 3678     scale(0x0);
 3679     disp($off);
 3680   %}
 3681 %}
 3682 
 3683 operand indirectN(iRegN reg)
 3684 %{
 3685   predicate(CompressedOops::shift() == 0);
 3686   constraint(ALLOC_IN_RC(ptr_reg));
 3687   match(DecodeN reg);
 3688   op_cost(0);
 3689   format %{ "[$reg]\t# narrow" %}
 3690   interface(MEMORY_INTER) %{
 3691     base($reg);
 3692     index(0xffffffff);
 3693     scale(0x0);
 3694     disp(0x0);
 3695   %}
 3696 %}
 3697 
 3698 operand indOffIN(iRegN reg, immIOffset off)
 3699 %{
 3700   predicate(CompressedOops::shift() == 0);
 3701   constraint(ALLOC_IN_RC(ptr_reg));
 3702   match(AddP (DecodeN reg) off);
 3703   op_cost(0);
 3704   format %{ "[$reg, $off]\t# narrow" %}
 3705   interface(MEMORY_INTER) %{
 3706     base($reg);
 3707     index(0xffffffff);
 3708     scale(0x0);
 3709     disp($off);
 3710   %}
 3711 %}
 3712 
 3713 operand indOffLN(iRegN reg, immLOffset off)
 3714 %{
 3715   predicate(CompressedOops::shift() == 0);
 3716   constraint(ALLOC_IN_RC(ptr_reg));
 3717   match(AddP (DecodeN reg) off);
 3718   op_cost(0);
 3719   format %{ "[$reg, $off]\t# narrow" %}
 3720   interface(MEMORY_INTER) %{
 3721     base($reg);
 3722     index(0xffffffff);
 3723     scale(0x0);
 3724     disp($off);
 3725   %}
 3726 %}
 3727 
 3728 //----------Special Memory Operands--------------------------------------------
 3729 // Stack Slot Operand - This operand is used for loading and storing temporary
 3730 //                      values on the stack where a match requires a value to
 3731 //                      flow through memory.
 3732 operand stackSlotI(sRegI reg)
 3733 %{
 3734   constraint(ALLOC_IN_RC(stack_slots));
 3735   // No match rule because this operand is only generated in matching
 3736   // match(RegI);
 3737   format %{ "[$reg]" %}
 3738   interface(MEMORY_INTER) %{
 3739     base(0x02);  // RSP
 3740     index(0xffffffff);  // No Index
 3741     scale(0x0);  // No Scale
 3742     disp($reg);  // Stack Offset
 3743   %}
 3744 %}
 3745 
 3746 operand stackSlotF(sRegF reg)
 3747 %{
 3748   constraint(ALLOC_IN_RC(stack_slots));
 3749   // No match rule because this operand is only generated in matching
 3750   // match(RegF);
 3751   format %{ "[$reg]" %}
 3752   interface(MEMORY_INTER) %{
 3753     base(0x02);  // RSP
 3754     index(0xffffffff);  // No Index
 3755     scale(0x0);  // No Scale
 3756     disp($reg);  // Stack Offset
 3757   %}
 3758 %}
 3759 
 3760 operand stackSlotD(sRegD reg)
 3761 %{
 3762   constraint(ALLOC_IN_RC(stack_slots));
 3763   // No match rule because this operand is only generated in matching
 3764   // match(RegD);
 3765   format %{ "[$reg]" %}
 3766   interface(MEMORY_INTER) %{
 3767     base(0x02);  // RSP
 3768     index(0xffffffff);  // No Index
 3769     scale(0x0);  // No Scale
 3770     disp($reg);  // Stack Offset
 3771   %}
 3772 %}
 3773 
 3774 operand stackSlotL(sRegL reg)
 3775 %{
 3776   constraint(ALLOC_IN_RC(stack_slots));
 3777   // No match rule because this operand is only generated in matching
 3778   // match(RegL);
 3779   format %{ "[$reg]" %}
 3780   interface(MEMORY_INTER) %{
 3781     base(0x02);  // RSP
 3782     index(0xffffffff);  // No Index
 3783     scale(0x0);  // No Scale
 3784     disp($reg);  // Stack Offset
 3785   %}
 3786 %}
 3787 
 3788 // Special operand allowing long args to int ops to be truncated for free
 3789 
 3790 operand iRegL2I(iRegL reg) %{
 3791 
 3792   op_cost(0);
 3793 
 3794   match(ConvL2I reg);
 3795 
 3796   format %{ "l2i($reg)" %}
 3797 
 3798   interface(REG_INTER)
 3799 %}
 3800 
 3801 
 3802 // Comparison Operands
 3803 // NOTE: Label is a predefined operand which should not be redefined in
 3804 //       the AD file. It is generically handled within the ADLC.
 3805 
 3806 //----------Conditional Branch Operands----------------------------------------
 3807 // Comparison Op  - This is the operation of the comparison, and is limited to
 3808 //                  the following set of codes:
 3809 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 3810 //
 3811 // Other attributes of the comparison, such as unsignedness, are specified
 3812 // by the comparison instruction that sets a condition code flags register.
 3813 // That result is represented by a flags operand whose subtype is appropriate
 3814 // to the unsignedness (etc.) of the comparison.
 3815 //
 3816 // Later, the instruction which matches both the Comparison Op (a Bool) and
 3817 // the flags (produced by the Cmp) specifies the coding of the comparison op
 3818 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 3819 
 3820 
 3821 // used for signed integral comparisons and fp comparisons
 3822 operand cmpOp()
 3823 %{
 3824   match(Bool);
 3825 
 3826   format %{ "" %}
 3827 
 3828   // the values in interface derives from struct BoolTest::mask
 3829   interface(COND_INTER) %{
 3830     equal(0x0, "eq");
 3831     greater(0x1, "gt");
 3832     overflow(0x2, "overflow");
 3833     less(0x3, "lt");
 3834     not_equal(0x4, "ne");
 3835     less_equal(0x5, "le");
 3836     no_overflow(0x6, "no_overflow");
 3837     greater_equal(0x7, "ge");
 3838   %}
 3839 %}
 3840 
 3841 // used for unsigned integral comparisons
 3842 operand cmpOpU()
 3843 %{
 3844   match(Bool);
 3845 
 3846   format %{ "" %}
 3847   // the values in interface derives from struct BoolTest::mask
 3848   interface(COND_INTER) %{
 3849     equal(0x0, "eq");
 3850     greater(0x1, "gtu");
 3851     overflow(0x2, "overflow");
 3852     less(0x3, "ltu");
 3853     not_equal(0x4, "ne");
 3854     less_equal(0x5, "leu");
 3855     no_overflow(0x6, "no_overflow");
 3856     greater_equal(0x7, "geu");
 3857   %}
 3858 %}
 3859 
 3860 // used for certain integral comparisons which can be
 3861 // converted to bxx instructions
 3862 operand cmpOpEqNe()
 3863 %{
 3864   match(Bool);
 3865   op_cost(0);
 3866   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3867             n->as_Bool()->_test._test == BoolTest::eq);
 3868 
 3869   format %{ "" %}
 3870   interface(COND_INTER) %{
 3871     equal(0x0, "eq");
 3872     greater(0x1, "gt");
 3873     overflow(0x2, "overflow");
 3874     less(0x3, "lt");
 3875     not_equal(0x4, "ne");
 3876     less_equal(0x5, "le");
 3877     no_overflow(0x6, "no_overflow");
 3878     greater_equal(0x7, "ge");
 3879   %}
 3880 %}
 3881 
 3882 operand cmpOpULtGe()
 3883 %{
 3884   match(Bool);
 3885   op_cost(0);
 3886   predicate(n->as_Bool()->_test._test == BoolTest::lt ||
 3887             n->as_Bool()->_test._test == BoolTest::ge);
 3888 
 3889   format %{ "" %}
 3890   interface(COND_INTER) %{
 3891     equal(0x0, "eq");
 3892     greater(0x1, "gtu");
 3893     overflow(0x2, "overflow");
 3894     less(0x3, "ltu");
 3895     not_equal(0x4, "ne");
 3896     less_equal(0x5, "leu");
 3897     no_overflow(0x6, "no_overflow");
 3898     greater_equal(0x7, "geu");
 3899   %}
 3900 %}
 3901 
 3902 operand cmpOpUEqNeLeGt()
 3903 %{
 3904   match(Bool);
 3905   op_cost(0);
 3906   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3907             n->as_Bool()->_test._test == BoolTest::eq ||
 3908             n->as_Bool()->_test._test == BoolTest::le ||
 3909             n->as_Bool()->_test._test == BoolTest::gt);
 3910 
 3911   format %{ "" %}
 3912   interface(COND_INTER) %{
 3913     equal(0x0, "eq");
 3914     greater(0x1, "gtu");
 3915     overflow(0x2, "overflow");
 3916     less(0x3, "ltu");
 3917     not_equal(0x4, "ne");
 3918     less_equal(0x5, "leu");
 3919     no_overflow(0x6, "no_overflow");
 3920     greater_equal(0x7, "geu");
 3921   %}
 3922 %}
 3923 
 3924 
 3925 // Flags register, used as output of compare logic
 3926 operand rFlagsReg()
 3927 %{
 3928   constraint(ALLOC_IN_RC(reg_flags));
 3929   match(RegFlags);
 3930 
 3931   op_cost(0);
 3932   format %{ "RFLAGS" %}
 3933   interface(REG_INTER);
 3934 %}
 3935 
 3936 // Special Registers
 3937 
 3938 // Method Register
 3939 operand inline_cache_RegP(iRegP reg)
 3940 %{
 3941   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 3942   match(reg);
 3943   match(iRegPNoSp);
 3944   op_cost(0);
 3945   format %{ %}
 3946   interface(REG_INTER);
 3947 %}
 3948 
 3949 //----------OPERAND CLASSES----------------------------------------------------
 3950 // Operand Classes are groups of operands that are used as to simplify
 3951 // instruction definitions by not requiring the AD writer to specify
 3952 // separate instructions for every form of operand when the
 3953 // instruction accepts multiple operand types with the same basic
 3954 // encoding and format. The classic case of this is memory operands.
 3955 
 3956 // memory is used to define read/write location for load/store
 3957 // instruction defs. we can turn a memory op into an Address
 3958 
 3959 opclass memory(indirect, indOffI, indOffL, indirectN, indOffIN, indOffLN);
 3960 
 3961 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 3962 // operations. it allows the src to be either an iRegI or a (ConvL2I
 3963 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 3964 // can be elided because the 32-bit instruction will just employ the
 3965 // lower 32 bits anyway.
 3966 //
 3967 // n.b. this does not elide all L2I conversions. if the truncated
 3968 // value is consumed by more than one operation then the ConvL2I
 3969 // cannot be bundled into the consuming nodes so an l2i gets planted
 3970 // (actually an addiw $dst, $src, 0) and the downstream instructions
 3971 // consume the result of the L2I as an iRegI input. That's a shame since
 3972 // the addiw is actually redundant but its not too costly.
 3973 
 3974 opclass iRegIorL2I(iRegI, iRegL2I);
 3975 opclass iRegIorL(iRegI, iRegL);
 3976 opclass iRegNorP(iRegN, iRegP);
 3977 opclass iRegILNP(iRegI, iRegL, iRegN, iRegP);
 3978 opclass iRegILNPNoSp(iRegINoSp, iRegLNoSp, iRegNNoSp, iRegPNoSp);
 3979 opclass immIorL(immI, immL);
 3980 
 3981 //----------PIPELINE-----------------------------------------------------------
 3982 // Rules which define the behavior of the target architectures pipeline.
 3983 
 3984 // For specific pipelines, e.g. generic RISC-V, define the stages of that pipeline
 3985 //pipe_desc(ID, EX, MEM, WR);
 3986 #define ID   S0
 3987 #define EX   S1
 3988 #define MEM  S2
 3989 #define WR   S3
 3990 
 3991 // Integer ALU reg operation
 3992 pipeline %{
 3993 
 3994 attributes %{
 3995   // RISC-V instructions are of fixed length
 3996   fixed_size_instructions;           // Fixed size instructions TODO does
 3997   max_instructions_per_bundle = 2;   // Generic RISC-V 1, Sifive Series 7 2
 3998   // RISC-V instructions come in 32-bit word units
 3999   instruction_unit_size = 4;         // An instruction is 4 bytes long
 4000   instruction_fetch_unit_size = 64;  // The processor fetches one line
 4001   instruction_fetch_units = 1;       // of 64 bytes
 4002 
 4003   // List of nop instructions
 4004   nops( MachNop );
 4005 %}
 4006 
 4007 // We don't use an actual pipeline model so don't care about resources
 4008 // or description. we do use pipeline classes to introduce fixed
 4009 // latencies
 4010 
 4011 //----------RESOURCES----------------------------------------------------------
 4012 // Resources are the functional units available to the machine
 4013 
 4014 // Generic RISC-V pipeline
 4015 // 1 decoder
 4016 // 1 instruction decoded per cycle
 4017 // 1 load/store ops per cycle, 1 branch, 1 FPU
 4018 // 1 mul, 1 div
 4019 
 4020 resources ( DECODE,
 4021             ALU,
 4022             MUL,
 4023             DIV,
 4024             BRANCH,
 4025             LDST,
 4026             FPU);
 4027 
 4028 //----------PIPELINE DESCRIPTION-----------------------------------------------
 4029 // Pipeline Description specifies the stages in the machine's pipeline
 4030 
 4031 // Define the pipeline as a generic 6 stage pipeline
 4032 pipe_desc(S0, S1, S2, S3, S4, S5);
 4033 
 4034 //----------PIPELINE CLASSES---------------------------------------------------
 4035 // Pipeline Classes describe the stages in which input and output are
 4036 // referenced by the hardware pipeline.
 4037 
 4038 pipe_class fp_dop_reg_reg_s(fRegF dst, fRegF src1, fRegF src2)
 4039 %{
 4040   single_instruction;
 4041   src1   : S1(read);
 4042   src2   : S2(read);
 4043   dst    : S5(write);
 4044   DECODE : ID;
 4045   FPU    : S5;
 4046 %}
 4047 
 4048 pipe_class fp_dop_reg_reg_d(fRegD dst, fRegD src1, fRegD src2)
 4049 %{
 4050   src1   : S1(read);
 4051   src2   : S2(read);
 4052   dst    : S5(write);
 4053   DECODE : ID;
 4054   FPU    : S5;
 4055 %}
 4056 
 4057 pipe_class fp_uop_s(fRegF dst, fRegF src)
 4058 %{
 4059   single_instruction;
 4060   src    : S1(read);
 4061   dst    : S5(write);
 4062   DECODE : ID;
 4063   FPU    : S5;
 4064 %}
 4065 
 4066 pipe_class fp_uop_d(fRegD dst, fRegD src)
 4067 %{
 4068   single_instruction;
 4069   src    : S1(read);
 4070   dst    : S5(write);
 4071   DECODE : ID;
 4072   FPU    : S5;
 4073 %}
 4074 
 4075 pipe_class fp_d2f(fRegF dst, fRegD src)
 4076 %{
 4077   single_instruction;
 4078   src    : S1(read);
 4079   dst    : S5(write);
 4080   DECODE : ID;
 4081   FPU    : S5;
 4082 %}
 4083 
 4084 pipe_class fp_f2d(fRegD dst, fRegF src)
 4085 %{
 4086   single_instruction;
 4087   src    : S1(read);
 4088   dst    : S5(write);
 4089   DECODE : ID;
 4090   FPU    : S5;
 4091 %}
 4092 
 4093 pipe_class fp_f2i(iRegINoSp dst, fRegF src)
 4094 %{
 4095   single_instruction;
 4096   src    : S1(read);
 4097   dst    : S5(write);
 4098   DECODE : ID;
 4099   FPU    : S5;
 4100 %}
 4101 
 4102 pipe_class fp_f2l(iRegLNoSp dst, fRegF src)
 4103 %{
 4104   single_instruction;
 4105   src    : S1(read);
 4106   dst    : S5(write);
 4107   DECODE : ID;
 4108   FPU    : S5;
 4109 %}
 4110 
 4111 pipe_class fp_i2f(fRegF dst, iRegIorL2I src)
 4112 %{
 4113   single_instruction;
 4114   src    : S1(read);
 4115   dst    : S5(write);
 4116   DECODE : ID;
 4117   FPU    : S5;
 4118 %}
 4119 
 4120 pipe_class fp_l2f(fRegF dst, iRegL src)
 4121 %{
 4122   single_instruction;
 4123   src    : S1(read);
 4124   dst    : S5(write);
 4125   DECODE : ID;
 4126   FPU    : S5;
 4127 %}
 4128 
 4129 pipe_class fp_d2i(iRegINoSp dst, fRegD src)
 4130 %{
 4131   single_instruction;
 4132   src    : S1(read);
 4133   dst    : S5(write);
 4134   DECODE : ID;
 4135   FPU    : S5;
 4136 %}
 4137 
 4138 pipe_class fp_d2l(iRegLNoSp dst, fRegD src)
 4139 %{
 4140   single_instruction;
 4141   src    : S1(read);
 4142   dst    : S5(write);
 4143   DECODE : ID;
 4144   FPU    : S5;
 4145 %}
 4146 
 4147 pipe_class fp_i2d(fRegD dst, iRegIorL2I src)
 4148 %{
 4149   single_instruction;
 4150   src    : S1(read);
 4151   dst    : S5(write);
 4152   DECODE : ID;
 4153   FPU    : S5;
 4154 %}
 4155 
 4156 pipe_class fp_l2d(fRegD dst, iRegIorL2I src)
 4157 %{
 4158   single_instruction;
 4159   src    : S1(read);
 4160   dst    : S5(write);
 4161   DECODE : ID;
 4162   FPU    : S5;
 4163 %}
 4164 
 4165 pipe_class fp_div_s(fRegF dst, fRegF src1, fRegF src2)
 4166 %{
 4167   single_instruction;
 4168   src1   : S1(read);
 4169   src2   : S2(read);
 4170   dst    : S5(write);
 4171   DECODE : ID;
 4172   FPU    : S5;
 4173 %}
 4174 
 4175 pipe_class fp_div_d(fRegD dst, fRegD src1, fRegD src2)
 4176 %{
 4177   single_instruction;
 4178   src1   : S1(read);
 4179   src2   : S2(read);
 4180   dst    : S5(write);
 4181   DECODE : ID;
 4182   FPU    : S5;
 4183 %}
 4184 
 4185 pipe_class fp_sqrt_s(fRegF dst, fRegF src1, fRegF src2)
 4186 %{
 4187   single_instruction;
 4188   src1   : S1(read);
 4189   src2   : S2(read);
 4190   dst    : S5(write);
 4191   DECODE : ID;
 4192   FPU    : S5;
 4193 %}
 4194 
 4195 pipe_class fp_sqrt_d(fRegD dst, fRegD src1, fRegD src2)
 4196 %{
 4197   single_instruction;
 4198   src1   : S1(read);
 4199   src2   : S2(read);
 4200   dst    : S5(write);
 4201   DECODE : ID;
 4202   FPU    : S5;
 4203 %}
 4204 
 4205 pipe_class fp_load_constant_s(fRegF dst)
 4206 %{
 4207   single_instruction;
 4208   dst    : S5(write);
 4209   DECODE : ID;
 4210   FPU    : S5;
 4211 %}
 4212 
 4213 pipe_class fp_load_constant_d(fRegD dst)
 4214 %{
 4215   single_instruction;
 4216   dst    : S5(write);
 4217   DECODE : ID;
 4218   FPU    : S5;
 4219 %}
 4220 
 4221 pipe_class fp_load_mem_s(fRegF dst, memory mem)
 4222 %{
 4223   single_instruction;
 4224   mem    : S1(read);
 4225   dst    : S5(write);
 4226   DECODE : ID;
 4227   LDST   : MEM;
 4228 %}
 4229 
 4230 pipe_class fp_load_mem_d(fRegD dst, memory mem)
 4231 %{
 4232   single_instruction;
 4233   mem    : S1(read);
 4234   dst    : S5(write);
 4235   DECODE : ID;
 4236   LDST   : MEM;
 4237 %}
 4238 
 4239 pipe_class fp_store_reg_s(fRegF src, memory mem)
 4240 %{
 4241   single_instruction;
 4242   src    : S1(read);
 4243   mem    : S5(write);
 4244   DECODE : ID;
 4245   LDST   : MEM;
 4246 %}
 4247 
 4248 pipe_class fp_store_reg_d(fRegD src, memory mem)
 4249 %{
 4250   single_instruction;
 4251   src    : S1(read);
 4252   mem    : S5(write);
 4253   DECODE : ID;
 4254   LDST   : MEM;
 4255 %}
 4256 
 4257 //------- Integer ALU operations --------------------------
 4258 
 4259 // Integer ALU reg-reg operation
 4260 // Operands needs in ID, result generated in EX
 4261 // E.g.  ADD   Rd, Rs1, Rs2
 4262 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4263 %{
 4264   single_instruction;
 4265   dst    : EX(write);
 4266   src1   : ID(read);
 4267   src2   : ID(read);
 4268   DECODE : ID;
 4269   ALU    : EX;
 4270 %}
 4271 
 4272 // Integer ALU reg operation with constant shift
 4273 // E.g. SLLI    Rd, Rs1, #shift
 4274 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 4275 %{
 4276   single_instruction;
 4277   dst    : EX(write);
 4278   src1   : ID(read);
 4279   DECODE : ID;
 4280   ALU    : EX;
 4281 %}
 4282 
 4283 // Integer ALU reg-reg operation with variable shift
 4284 // both operands must be available in ID
 4285 // E.g. SLL   Rd, Rs1, Rs2
 4286 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 4287 %{
 4288   single_instruction;
 4289   dst    : EX(write);
 4290   src1   : ID(read);
 4291   src2   : ID(read);
 4292   DECODE : ID;
 4293   ALU    : EX;
 4294 %}
 4295 
 4296 // Integer ALU reg operation
 4297 // E.g. NEG   Rd, Rs2
 4298 pipe_class ialu_reg(iRegI dst, iRegI src)
 4299 %{
 4300   single_instruction;
 4301   dst    : EX(write);
 4302   src    : ID(read);
 4303   DECODE : ID;
 4304   ALU    : EX;
 4305 %}
 4306 
 4307 // Integer ALU reg immediate operation
 4308 // E.g. ADDI   Rd, Rs1, #imm
 4309 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 4310 %{
 4311   single_instruction;
 4312   dst    : EX(write);
 4313   src1   : ID(read);
 4314   DECODE : ID;
 4315   ALU    : EX;
 4316 %}
 4317 
 4318 // Integer ALU immediate operation (no source operands)
 4319 // E.g. LI    Rd, #imm
 4320 pipe_class ialu_imm(iRegI dst)
 4321 %{
 4322   single_instruction;
 4323   dst    : EX(write);
 4324   DECODE : ID;
 4325   ALU    : EX;
 4326 %}
 4327 
 4328 //------- Multiply pipeline operations --------------------
 4329 
 4330 // Multiply reg-reg
 4331 // E.g. MULW   Rd, Rs1, Rs2
 4332 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4333 %{
 4334   single_instruction;
 4335   dst    : WR(write);
 4336   src1   : ID(read);
 4337   src2   : ID(read);
 4338   DECODE : ID;
 4339   MUL    : WR;
 4340 %}
 4341 
 4342 // E.g. MUL   RD, Rs1, Rs2
 4343 pipe_class lmul_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4344 %{
 4345   single_instruction;
 4346   fixed_latency(3); // Maximum latency for 64 bit mul
 4347   dst    : WR(write);
 4348   src1   : ID(read);
 4349   src2   : ID(read);
 4350   DECODE : ID;
 4351   MUL    : WR;
 4352 %}
 4353 
 4354 //------- Divide pipeline operations --------------------
 4355 
 4356 // E.g. DIVW   Rd, Rs1, Rs2
 4357 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4358 %{
 4359   single_instruction;
 4360   fixed_latency(8); // Maximum latency for 32 bit divide
 4361   dst    : WR(write);
 4362   src1   : ID(read);
 4363   src2   : ID(read);
 4364   DECODE : ID;
 4365   DIV    : WR;
 4366 %}
 4367 
 4368 // E.g. DIV   RD, Rs1, Rs2
 4369 pipe_class ldiv_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4370 %{
 4371   single_instruction;
 4372   fixed_latency(16); // Maximum latency for 64 bit divide
 4373   dst    : WR(write);
 4374   src1   : ID(read);
 4375   src2   : ID(read);
 4376   DECODE : ID;
 4377   DIV    : WR;
 4378 %}
 4379 
 4380 //------- Load pipeline operations ------------------------
 4381 
 4382 // Load - prefetch
 4383 // Eg.  PREFETCH_W  mem
 4384 pipe_class iload_prefetch(memory mem)
 4385 %{
 4386   single_instruction;
 4387   mem    : ID(read);
 4388   DECODE : ID;
 4389   LDST   : MEM;
 4390 %}
 4391 
 4392 // Load - reg, mem
 4393 // E.g. LA    Rd, mem
 4394 pipe_class iload_reg_mem(iRegI dst, memory mem)
 4395 %{
 4396   single_instruction;
 4397   dst    : WR(write);
 4398   mem    : ID(read);
 4399   DECODE : ID;
 4400   LDST   : MEM;
 4401 %}
 4402 
 4403 // Load - reg, reg
 4404 // E.g. LD    Rd, Rs
 4405 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 4406 %{
 4407   single_instruction;
 4408   dst    : WR(write);
 4409   src    : ID(read);
 4410   DECODE : ID;
 4411   LDST   : MEM;
 4412 %}
 4413 
 4414 //------- Store pipeline operations -----------------------
 4415 
 4416 // Store - zr, mem
 4417 // E.g. SD    zr, mem
 4418 pipe_class istore_mem(memory mem)
 4419 %{
 4420   single_instruction;
 4421   mem    : ID(read);
 4422   DECODE : ID;
 4423   LDST   : MEM;
 4424 %}
 4425 
 4426 // Store - reg, mem
 4427 // E.g. SD    Rs, mem
 4428 pipe_class istore_reg_mem(iRegI src, memory mem)
 4429 %{
 4430   single_instruction;
 4431   mem    : ID(read);
 4432   src    : EX(read);
 4433   DECODE : ID;
 4434   LDST   : MEM;
 4435 %}
 4436 
 4437 // Store - reg, reg
 4438 // E.g. SD    Rs2, Rs1
 4439 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 4440 %{
 4441   single_instruction;
 4442   dst    : ID(read);
 4443   src    : EX(read);
 4444   DECODE : ID;
 4445   LDST   : MEM;
 4446 %}
 4447 
 4448 //------- Control transfer pipeline operations ------------
 4449 
 4450 // Branch
 4451 pipe_class pipe_branch()
 4452 %{
 4453   single_instruction;
 4454   DECODE : ID;
 4455   BRANCH : EX;
 4456 %}
 4457 
 4458 // Branch
 4459 pipe_class pipe_branch_reg(iRegI src)
 4460 %{
 4461   single_instruction;
 4462   src    : ID(read);
 4463   DECODE : ID;
 4464   BRANCH : EX;
 4465 %}
 4466 
 4467 // Compare & Branch
 4468 // E.g. BEQ   Rs1, Rs2, L
 4469 pipe_class pipe_cmp_branch(iRegI src1, iRegI src2)
 4470 %{
 4471   single_instruction;
 4472   src1   : ID(read);
 4473   src2   : ID(read);
 4474   DECODE : ID;
 4475   BRANCH : EX;
 4476 %}
 4477 
 4478 // E.g. BEQZ Rs, L
 4479 pipe_class pipe_cmpz_branch(iRegI src)
 4480 %{
 4481   single_instruction;
 4482   src    : ID(read);
 4483   DECODE : ID;
 4484   BRANCH : EX;
 4485 %}
 4486 
 4487 //------- Synchronisation operations ----------------------
 4488 // Any operation requiring serialization
 4489 // E.g. FENCE/Atomic Ops/Load Acquire/Store Release
 4490 pipe_class pipe_serial()
 4491 %{
 4492   single_instruction;
 4493   force_serialization;
 4494   fixed_latency(16);
 4495   DECODE : ID;
 4496   LDST   : MEM;
 4497 %}
 4498 
 4499 pipe_class pipe_slow()
 4500 %{
 4501   instruction_count(10);
 4502   multiple_bundles;
 4503   force_serialization;
 4504   fixed_latency(16);
 4505   DECODE : ID;
 4506   LDST   : MEM;
 4507 %}
 4508 
 4509 // Empty pipeline class
 4510 pipe_class pipe_class_empty()
 4511 %{
 4512   single_instruction;
 4513   fixed_latency(0);
 4514 %}
 4515 
 4516 // Default pipeline class.
 4517 pipe_class pipe_class_default()
 4518 %{
 4519   single_instruction;
 4520   fixed_latency(2);
 4521 %}
 4522 
 4523 // Pipeline class for compares.
 4524 pipe_class pipe_class_compare()
 4525 %{
 4526   single_instruction;
 4527   fixed_latency(16);
 4528 %}
 4529 
 4530 // Pipeline class for memory operations.
 4531 pipe_class pipe_class_memory()
 4532 %{
 4533   single_instruction;
 4534   fixed_latency(16);
 4535 %}
 4536 
 4537 // Pipeline class for call.
 4538 pipe_class pipe_class_call()
 4539 %{
 4540   single_instruction;
 4541   fixed_latency(100);
 4542 %}
 4543 
 4544 // Define the class for the Nop node.
 4545 define %{
 4546    MachNop = pipe_class_empty;
 4547 %}
 4548 %}
 4549 //----------INSTRUCTIONS-------------------------------------------------------
 4550 //
 4551 // match      -- States which machine-independent subtree may be replaced
 4552 //               by this instruction.
 4553 // ins_cost   -- The estimated cost of this instruction is used by instruction
 4554 //               selection to identify a minimum cost tree of machine
 4555 //               instructions that matches a tree of machine-independent
 4556 //               instructions.
 4557 // format     -- A string providing the disassembly for this instruction.
 4558 //               The value of an instruction's operand may be inserted
 4559 //               by referring to it with a '$' prefix.
 4560 // opcode     -- Three instruction opcodes may be provided.  These are referred
 4561 //               to within an encode class as $primary, $secondary, and $tertiary
 4562 //               rrspectively.  The primary opcode is commonly used to
 4563 //               indicate the type of machine instruction, while secondary
 4564 //               and tertiary are often used for prefix options or addressing
 4565 //               modes.
 4566 // ins_encode -- A list of encode classes with parameters. The encode class
 4567 //               name must have been defined in an 'enc_class' specification
 4568 //               in the encode section of the architecture description.
 4569 
 4570 // ============================================================================
 4571 // Memory (Load/Store) Instructions
 4572 
 4573 // Load Instructions
 4574 
 4575 // Load Byte (8 bit signed)
 4576 instruct loadB(iRegINoSp dst, memory mem)
 4577 %{
 4578   match(Set dst (LoadB mem));
 4579 
 4580   ins_cost(LOAD_COST);
 4581   format %{ "lb  $dst, $mem\t# byte, #@loadB" %}
 4582 
 4583   ins_encode %{
 4584     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4585   %}
 4586 
 4587   ins_pipe(iload_reg_mem);
 4588 %}
 4589 
 4590 // Load Byte (8 bit signed) into long
 4591 instruct loadB2L(iRegLNoSp dst, memory mem)
 4592 %{
 4593   match(Set dst (ConvI2L (LoadB mem)));
 4594 
 4595   ins_cost(LOAD_COST);
 4596   format %{ "lb  $dst, $mem\t# byte, #@loadB2L" %}
 4597 
 4598   ins_encode %{
 4599     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4600   %}
 4601 
 4602   ins_pipe(iload_reg_mem);
 4603 %}
 4604 
 4605 // Load Byte (8 bit unsigned)
 4606 instruct loadUB(iRegINoSp dst, memory mem)
 4607 %{
 4608   match(Set dst (LoadUB mem));
 4609 
 4610   ins_cost(LOAD_COST);
 4611   format %{ "lbu  $dst, $mem\t# byte, #@loadUB" %}
 4612 
 4613   ins_encode %{
 4614     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4615   %}
 4616 
 4617   ins_pipe(iload_reg_mem);
 4618 %}
 4619 
 4620 // Load Byte (8 bit unsigned) into long
 4621 instruct loadUB2L(iRegLNoSp dst, memory mem)
 4622 %{
 4623   match(Set dst (ConvI2L (LoadUB mem)));
 4624 
 4625   ins_cost(LOAD_COST);
 4626   format %{ "lbu  $dst, $mem\t# byte, #@loadUB2L" %}
 4627 
 4628   ins_encode %{
 4629     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4630   %}
 4631 
 4632   ins_pipe(iload_reg_mem);
 4633 %}
 4634 
 4635 // Load Short (16 bit signed)
 4636 instruct loadS(iRegINoSp dst, memory mem)
 4637 %{
 4638   match(Set dst (LoadS mem));
 4639 
 4640   ins_cost(LOAD_COST);
 4641   format %{ "lh  $dst, $mem\t# short, #@loadS" %}
 4642 
 4643   ins_encode %{
 4644     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4645   %}
 4646 
 4647   ins_pipe(iload_reg_mem);
 4648 %}
 4649 
 4650 // Load Short (16 bit signed) into long
 4651 instruct loadS2L(iRegLNoSp dst, memory mem)
 4652 %{
 4653   match(Set dst (ConvI2L (LoadS mem)));
 4654 
 4655   ins_cost(LOAD_COST);
 4656   format %{ "lh  $dst, $mem\t# short, #@loadS2L" %}
 4657 
 4658   ins_encode %{
 4659     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4660   %}
 4661 
 4662   ins_pipe(iload_reg_mem);
 4663 %}
 4664 
 4665 // Load Char (16 bit unsigned)
 4666 instruct loadUS(iRegINoSp dst, memory mem)
 4667 %{
 4668   match(Set dst (LoadUS mem));
 4669 
 4670   ins_cost(LOAD_COST);
 4671   format %{ "lhu  $dst, $mem\t# short, #@loadUS" %}
 4672 
 4673   ins_encode %{
 4674     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4675   %}
 4676 
 4677   ins_pipe(iload_reg_mem);
 4678 %}
 4679 
 4680 // Load Short/Char (16 bit unsigned) into long
 4681 instruct loadUS2L(iRegLNoSp dst, memory mem)
 4682 %{
 4683   match(Set dst (ConvI2L (LoadUS mem)));
 4684 
 4685   ins_cost(LOAD_COST);
 4686   format %{ "lhu  $dst, $mem\t# short, #@loadUS2L" %}
 4687 
 4688   ins_encode %{
 4689     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4690   %}
 4691 
 4692   ins_pipe(iload_reg_mem);
 4693 %}
 4694 
 4695 // Load Integer (32 bit signed)
 4696 instruct loadI(iRegINoSp dst, memory mem)
 4697 %{
 4698   match(Set dst (LoadI mem));
 4699 
 4700   ins_cost(LOAD_COST);
 4701   format %{ "lw  $dst, $mem\t# int, #@loadI" %}
 4702 
 4703   ins_encode %{
 4704     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4705   %}
 4706 
 4707   ins_pipe(iload_reg_mem);
 4708 %}
 4709 
 4710 // Load Integer (32 bit signed) into long
 4711 instruct loadI2L(iRegLNoSp dst, memory mem)
 4712 %{
 4713   match(Set dst (ConvI2L (LoadI mem)));
 4714 
 4715   ins_cost(LOAD_COST);
 4716   format %{ "lw  $dst, $mem\t# int, #@loadI2L" %}
 4717 
 4718   ins_encode %{
 4719     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4720   %}
 4721 
 4722   ins_pipe(iload_reg_mem);
 4723 %}
 4724 
 4725 // Load Integer (32 bit unsigned) into long
 4726 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
 4727 %{
 4728   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 4729 
 4730   ins_cost(LOAD_COST);
 4731   format %{ "lwu  $dst, $mem\t# int, #@loadUI2L" %}
 4732 
 4733   ins_encode %{
 4734     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4735   %}
 4736 
 4737   ins_pipe(iload_reg_mem);
 4738 %}
 4739 
 4740 // Load Long (64 bit signed)
 4741 instruct loadL(iRegLNoSp dst, memory mem)
 4742 %{
 4743   match(Set dst (LoadL mem));
 4744 
 4745   ins_cost(LOAD_COST);
 4746   format %{ "ld  $dst, $mem\t# int, #@loadL" %}
 4747 
 4748   ins_encode %{
 4749     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4750   %}
 4751 
 4752   ins_pipe(iload_reg_mem);
 4753 %}
 4754 
 4755 // Load Range
 4756 instruct loadRange(iRegINoSp dst, memory mem)
 4757 %{
 4758   match(Set dst (LoadRange mem));
 4759 
 4760   ins_cost(LOAD_COST);
 4761   format %{ "lwu  $dst, $mem\t# range, #@loadRange" %}
 4762 
 4763   ins_encode %{
 4764     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4765   %}
 4766 
 4767   ins_pipe(iload_reg_mem);
 4768 %}
 4769 
 4770 // Load Pointer
 4771 instruct loadP(iRegPNoSp dst, memory mem)
 4772 %{
 4773   match(Set dst (LoadP mem));
 4774   predicate(n->as_Load()->barrier_data() == 0);
 4775 
 4776   ins_cost(LOAD_COST);
 4777   format %{ "ld  $dst, $mem\t# ptr, #@loadP" %}
 4778 
 4779   ins_encode %{
 4780     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4781   %}
 4782 
 4783   ins_pipe(iload_reg_mem);
 4784 %}
 4785 
 4786 // Load Compressed Pointer
 4787 instruct loadN(iRegNNoSp dst, memory mem)
 4788 %{
 4789   predicate(n->as_Load()->barrier_data() == 0);
 4790   match(Set dst (LoadN mem));
 4791 
 4792   ins_cost(LOAD_COST);
 4793   format %{ "lwu  $dst, $mem\t# loadN, compressed ptr, #@loadN" %}
 4794 
 4795   ins_encode %{
 4796     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4797   %}
 4798 
 4799   ins_pipe(iload_reg_mem);
 4800 %}
 4801 
 4802 // Load Klass Pointer
 4803 instruct loadKlass(iRegPNoSp dst, memory mem)
 4804 %{
 4805   match(Set dst (LoadKlass mem));
 4806 
 4807   ins_cost(LOAD_COST);
 4808   format %{ "ld  $dst, $mem\t# class, #@loadKlass" %}
 4809 
 4810   ins_encode %{
 4811     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4812   %}
 4813 
 4814   ins_pipe(iload_reg_mem);
 4815 %}
 4816 
 4817 // Load Narrow Klass Pointer
 4818 instruct loadNKlass(iRegNNoSp dst, memory mem)
 4819 %{
 4820   match(Set dst (LoadNKlass mem));
 4821 
 4822   ins_cost(LOAD_COST);
 4823   format %{ "lwu  $dst, $mem\t# loadNKlass, compressed class ptr, #@loadNKlass" %}
 4824 
 4825   ins_encode %{
 4826     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4827   %}
 4828 
 4829   ins_pipe(iload_reg_mem);
 4830 %}
 4831 
 4832 // Load Float
 4833 instruct loadF(fRegF dst, memory mem)
 4834 %{
 4835   match(Set dst (LoadF mem));
 4836 
 4837   ins_cost(LOAD_COST);
 4838   format %{ "flw  $dst, $mem\t# float, #@loadF" %}
 4839 
 4840   ins_encode %{
 4841     __ flw(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4842   %}
 4843 
 4844   ins_pipe(fp_load_mem_s);
 4845 %}
 4846 
 4847 // Load Double
 4848 instruct loadD(fRegD dst, memory mem)
 4849 %{
 4850   match(Set dst (LoadD mem));
 4851 
 4852   ins_cost(LOAD_COST);
 4853   format %{ "fld  $dst, $mem\t# double, #@loadD" %}
 4854 
 4855   ins_encode %{
 4856     __ fld(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4857   %}
 4858 
 4859   ins_pipe(fp_load_mem_d);
 4860 %}
 4861 
 4862 // Load Int Constant
 4863 instruct loadConI(iRegINoSp dst, immI src)
 4864 %{
 4865   match(Set dst src);
 4866 
 4867   ins_cost(ALU_COST);
 4868   format %{ "mv $dst, $src\t# int, #@loadConI" %}
 4869 
 4870   ins_encode(riscv_enc_mov_imm(dst, src));
 4871 
 4872   ins_pipe(ialu_imm);
 4873 %}
 4874 
 4875 // Load Long Constant
 4876 instruct loadConL(iRegLNoSp dst, immL src)
 4877 %{
 4878   match(Set dst src);
 4879 
 4880   ins_cost(ALU_COST);
 4881   format %{ "mv $dst, $src\t# long, #@loadConL" %}
 4882 
 4883   ins_encode(riscv_enc_mov_imm(dst, src));
 4884 
 4885   ins_pipe(ialu_imm);
 4886 %}
 4887 
 4888 // Load Pointer Constant
 4889 instruct loadConP(iRegPNoSp dst, immP con)
 4890 %{
 4891   match(Set dst con);
 4892 
 4893   ins_cost(ALU_COST);
 4894   format %{ "mv  $dst, $con\t# ptr, #@loadConP" %}
 4895 
 4896   ins_encode(riscv_enc_mov_p(dst, con));
 4897 
 4898   ins_pipe(ialu_imm);
 4899 %}
 4900 
 4901 // Load Null Pointer Constant
 4902 instruct loadConP0(iRegPNoSp dst, immP0 con)
 4903 %{
 4904   match(Set dst con);
 4905 
 4906   ins_cost(ALU_COST);
 4907   format %{ "mv  $dst, $con\t# null pointer, #@loadConP0" %}
 4908 
 4909   ins_encode(riscv_enc_mov_zero(dst));
 4910 
 4911   ins_pipe(ialu_imm);
 4912 %}
 4913 
 4914 // Load Pointer Constant One
 4915 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 4916 %{
 4917   match(Set dst con);
 4918 
 4919   ins_cost(ALU_COST);
 4920   format %{ "mv  $dst, $con\t# load ptr constant one, #@loadConP1" %}
 4921 
 4922   ins_encode(riscv_enc_mov_p1(dst));
 4923 
 4924   ins_pipe(ialu_imm);
 4925 %}
 4926 
 4927 // Load Byte Map Base Constant
 4928 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 4929 %{
 4930   match(Set dst con);
 4931   ins_cost(ALU_COST);
 4932   format %{ "mv  $dst, $con\t# Byte Map Base, #@loadByteMapBase" %}
 4933 
 4934   ins_encode(riscv_enc_mov_byte_map_base(dst));
 4935 
 4936   ins_pipe(ialu_imm);
 4937 %}
 4938 
 4939 // Load Narrow Pointer Constant
 4940 instruct loadConN(iRegNNoSp dst, immN con)
 4941 %{
 4942   match(Set dst con);
 4943 
 4944   ins_cost(ALU_COST * 4);
 4945   format %{ "mv  $dst, $con\t# compressed ptr, #@loadConN" %}
 4946 
 4947   ins_encode(riscv_enc_mov_n(dst, con));
 4948 
 4949   ins_pipe(ialu_imm);
 4950 %}
 4951 
 4952 // Load Narrow Null Pointer Constant
 4953 instruct loadConN0(iRegNNoSp dst, immN0 con)
 4954 %{
 4955   match(Set dst con);
 4956 
 4957   ins_cost(ALU_COST);
 4958   format %{ "mv  $dst, $con\t# compressed null pointer, #@loadConN0" %}
 4959 
 4960   ins_encode(riscv_enc_mov_zero(dst));
 4961 
 4962   ins_pipe(ialu_imm);
 4963 %}
 4964 
 4965 // Load Narrow Klass Constant
 4966 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 4967 %{
 4968   match(Set dst con);
 4969 
 4970   ins_cost(ALU_COST * 6);
 4971   format %{ "mv  $dst, $con\t# compressed klass ptr, #@loadConNKlass" %}
 4972 
 4973   ins_encode(riscv_enc_mov_nk(dst, con));
 4974 
 4975   ins_pipe(ialu_imm);
 4976 %}
 4977 
 4978 // Load Float Constant
 4979 instruct loadConF(fRegF dst, immF con) %{
 4980   match(Set dst con);
 4981 
 4982   ins_cost(LOAD_COST);
 4983   format %{
 4984     "flw $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConF"
 4985   %}
 4986 
 4987   ins_encode %{
 4988     __ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
 4989   %}
 4990 
 4991   ins_pipe(fp_load_constant_s);
 4992 %}
 4993 
 4994 instruct loadConF0(fRegF dst, immF0 con) %{
 4995   match(Set dst con);
 4996 
 4997   ins_cost(XFER_COST);
 4998 
 4999   format %{ "fmv.w.x $dst, zr\t# float, #@loadConF0" %}
 5000 
 5001   ins_encode %{
 5002     __ fmv_w_x(as_FloatRegister($dst$$reg), zr);
 5003   %}
 5004 
 5005   ins_pipe(fp_load_constant_s);
 5006 %}
 5007 
 5008 // Load Double Constant
 5009 instruct loadConD(fRegD dst, immD con) %{
 5010   match(Set dst con);
 5011 
 5012   ins_cost(LOAD_COST);
 5013   format %{
 5014     "fld $dst, [$constantaddress]\t# load from constant table: double=$con, #@loadConD"
 5015   %}
 5016 
 5017   ins_encode %{
 5018     __ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
 5019   %}
 5020 
 5021   ins_pipe(fp_load_constant_d);
 5022 %}
 5023 
 5024 instruct loadConD0(fRegD dst, immD0 con) %{
 5025   match(Set dst con);
 5026 
 5027   ins_cost(XFER_COST);
 5028 
 5029   format %{ "fmv.d.x $dst, zr\t# double, #@loadConD0" %}
 5030 
 5031   ins_encode %{
 5032     __ fmv_d_x(as_FloatRegister($dst$$reg), zr);
 5033   %}
 5034 
 5035   ins_pipe(fp_load_constant_d);
 5036 %}
 5037 
 5038 // Store Instructions
 5039 // Store CMS card-mark Immediate
 5040 instruct storeimmCM0(immI0 zero, memory mem)
 5041 %{
 5042   match(Set mem (StoreCM mem zero));
 5043 
 5044   ins_cost(STORE_COST);
 5045   format %{ "storestore (elided)\n\t"
 5046             "sb zr, $mem\t# byte, #@storeimmCM0" %}
 5047 
 5048   ins_encode %{
 5049     __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
 5050   %}
 5051 
 5052   ins_pipe(istore_mem);
 5053 %}
 5054 
 5055 // Store CMS card-mark Immediate with intervening StoreStore
 5056 // needed when using CMS with no conditional card marking
 5057 instruct storeimmCM0_ordered(immI0 zero, memory mem)
 5058 %{
 5059   match(Set mem (StoreCM mem zero));
 5060 
 5061   ins_cost(ALU_COST + STORE_COST);
 5062   format %{ "membar(StoreStore)\n\t"
 5063             "sb zr, $mem\t# byte, #@storeimmCM0_ordered" %}
 5064 
 5065   ins_encode %{
 5066     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
 5067     __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
 5068   %}
 5069 
 5070   ins_pipe(istore_mem);
 5071 %}
 5072 
 5073 // Store Byte
 5074 instruct storeB(iRegIorL2I src, memory mem)
 5075 %{
 5076   match(Set mem (StoreB mem src));
 5077 
 5078   ins_cost(STORE_COST);
 5079   format %{ "sb  $src, $mem\t# byte, #@storeB" %}
 5080 
 5081   ins_encode %{
 5082     __ sb(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5083   %}
 5084 
 5085   ins_pipe(istore_reg_mem);
 5086 %}
 5087 
 5088 instruct storeimmB0(immI0 zero, memory mem)
 5089 %{
 5090   match(Set mem (StoreB mem zero));
 5091 
 5092   ins_cost(STORE_COST);
 5093   format %{ "sb zr, $mem\t# byte, #@storeimmB0" %}
 5094 
 5095   ins_encode %{
 5096     __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
 5097   %}
 5098 
 5099   ins_pipe(istore_mem);
 5100 %}
 5101 
 5102 // Store Char/Short
 5103 instruct storeC(iRegIorL2I src, memory mem)
 5104 %{
 5105   match(Set mem (StoreC mem src));
 5106 
 5107   ins_cost(STORE_COST);
 5108   format %{ "sh  $src, $mem\t# short, #@storeC" %}
 5109 
 5110   ins_encode %{
 5111     __ sh(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5112   %}
 5113 
 5114   ins_pipe(istore_reg_mem);
 5115 %}
 5116 
 5117 instruct storeimmC0(immI0 zero, memory mem)
 5118 %{
 5119   match(Set mem (StoreC mem zero));
 5120 
 5121   ins_cost(STORE_COST);
 5122   format %{ "sh  zr, $mem\t# short, #@storeimmC0" %}
 5123 
 5124   ins_encode %{
 5125     __ sh(zr, Address(as_Register($mem$$base), $mem$$disp));
 5126   %}
 5127 
 5128   ins_pipe(istore_mem);
 5129 %}
 5130 
 5131 // Store Integer
 5132 instruct storeI(iRegIorL2I src, memory mem)
 5133 %{
 5134   match(Set mem(StoreI mem src));
 5135 
 5136   ins_cost(STORE_COST);
 5137   format %{ "sw  $src, $mem\t# int, #@storeI" %}
 5138 
 5139   ins_encode %{
 5140     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5141   %}
 5142 
 5143   ins_pipe(istore_reg_mem);
 5144 %}
 5145 
 5146 instruct storeimmI0(immI0 zero, memory mem)
 5147 %{
 5148   match(Set mem(StoreI mem zero));
 5149 
 5150   ins_cost(STORE_COST);
 5151   format %{ "sw  zr, $mem\t# int, #@storeimmI0" %}
 5152 
 5153   ins_encode %{
 5154     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5155   %}
 5156 
 5157   ins_pipe(istore_mem);
 5158 %}
 5159 
 5160 // Store Long (64 bit signed)
 5161 instruct storeL(iRegL src, memory mem)
 5162 %{
 5163   match(Set mem (StoreL mem src));
 5164 
 5165   ins_cost(STORE_COST);
 5166   format %{ "sd  $src, $mem\t# long, #@storeL" %}
 5167 
 5168   ins_encode %{
 5169     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5170   %}
 5171 
 5172   ins_pipe(istore_reg_mem);
 5173 %}
 5174 
 5175 // Store Long (64 bit signed)
 5176 instruct storeimmL0(immL0 zero, memory mem)
 5177 %{
 5178   match(Set mem (StoreL mem zero));
 5179 
 5180   ins_cost(STORE_COST);
 5181   format %{ "sd  zr, $mem\t# long, #@storeimmL0" %}
 5182 
 5183   ins_encode %{
 5184     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5185   %}
 5186 
 5187   ins_pipe(istore_mem);
 5188 %}
 5189 
 5190 // Store Pointer
 5191 instruct storeP(iRegP src, memory mem)
 5192 %{
 5193   match(Set mem (StoreP mem src));
 5194   predicate(n->as_Store()->barrier_data() == 0);
 5195 
 5196   ins_cost(STORE_COST);
 5197   format %{ "sd  $src, $mem\t# ptr, #@storeP" %}
 5198 
 5199   ins_encode %{
 5200     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5201   %}
 5202 
 5203   ins_pipe(istore_reg_mem);
 5204 %}
 5205 
 5206 // Store Pointer
 5207 instruct storeimmP0(immP0 zero, memory mem)
 5208 %{
 5209   match(Set mem (StoreP mem zero));
 5210   predicate(n->as_Store()->barrier_data() == 0);
 5211 
 5212   ins_cost(STORE_COST);
 5213   format %{ "sd zr, $mem\t# ptr, #@storeimmP0" %}
 5214 
 5215   ins_encode %{
 5216     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5217   %}
 5218 
 5219   ins_pipe(istore_mem);
 5220 %}
 5221 
 5222 // Store Compressed Pointer
 5223 instruct storeN(iRegN src, memory mem)
 5224 %{
 5225   predicate(n->as_Store()->barrier_data() == 0);
 5226   match(Set mem (StoreN mem src));
 5227 
 5228   ins_cost(STORE_COST);
 5229   format %{ "sw  $src, $mem\t# compressed ptr, #@storeN" %}
 5230 
 5231   ins_encode %{
 5232     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5233   %}
 5234 
 5235   ins_pipe(istore_reg_mem);
 5236 %}
 5237 
 5238 instruct storeImmN0(immN0 zero, memory mem)
 5239 %{
 5240   predicate(n->as_Store()->barrier_data() == 0);
 5241   match(Set mem (StoreN mem zero));
 5242 
 5243   ins_cost(STORE_COST);
 5244   format %{ "sw  zr, $mem\t# compressed ptr, #@storeImmN0" %}
 5245 
 5246   ins_encode %{
 5247     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5248   %}
 5249 
 5250   ins_pipe(istore_reg_mem);
 5251 %}
 5252 
 5253 // Store Float
 5254 instruct storeF(fRegF src, memory mem)
 5255 %{
 5256   match(Set mem (StoreF mem src));
 5257 
 5258   ins_cost(STORE_COST);
 5259   format %{ "fsw  $src, $mem\t# float, #@storeF" %}
 5260 
 5261   ins_encode %{
 5262     __ fsw(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5263   %}
 5264 
 5265   ins_pipe(fp_store_reg_s);
 5266 %}
 5267 
 5268 // Store Double
 5269 instruct storeD(fRegD src, memory mem)
 5270 %{
 5271   match(Set mem (StoreD mem src));
 5272 
 5273   ins_cost(STORE_COST);
 5274   format %{ "fsd  $src, $mem\t# double, #@storeD" %}
 5275 
 5276   ins_encode %{
 5277     __ fsd(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5278   %}
 5279 
 5280   ins_pipe(fp_store_reg_d);
 5281 %}
 5282 
 5283 // Store Compressed Klass Pointer
 5284 instruct storeNKlass(iRegN src, memory mem)
 5285 %{
 5286   match(Set mem (StoreNKlass mem src));
 5287 
 5288   ins_cost(STORE_COST);
 5289   format %{ "sw  $src, $mem\t# compressed klass ptr, #@storeNKlass" %}
 5290 
 5291   ins_encode %{
 5292     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5293   %}
 5294 
 5295   ins_pipe(istore_reg_mem);
 5296 %}
 5297 
 5298 // ============================================================================
 5299 // Prefetch instructions
 5300 // Must be safe to execute with invalid address (cannot fault).
 5301 
 5302 instruct prefetchalloc( memory mem ) %{
 5303   predicate(UseZicbop);
 5304   match(PrefetchAllocation mem);
 5305 
 5306   ins_cost(ALU_COST * 1);
 5307   format %{ "prefetch_w $mem\t# Prefetch for write" %}
 5308 
 5309   ins_encode %{
 5310     if (Assembler::is_simm12($mem$$disp)) {
 5311       if (($mem$$disp & 0x1f) == 0) {
 5312         __ prefetch_w(as_Register($mem$$base), $mem$$disp);
 5313       } else {
 5314         __ addi(t0, as_Register($mem$$base), $mem$$disp);
 5315         __ prefetch_w(t0, 0);
 5316       }
 5317     } else {
 5318       __ mv(t0, $mem$$disp);
 5319       __ add(t0, as_Register($mem$$base), t0);
 5320       __ prefetch_w(t0, 0);
 5321     }
 5322   %}
 5323 
 5324   ins_pipe(iload_prefetch);
 5325 %}
 5326 
 5327 // ============================================================================
 5328 // Atomic operation instructions
 5329 //
 5330 
 5331 // standard CompareAndSwapX when we are using barriers
 5332 // these have higher priority than the rules selected by a predicate
 5333 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5334                          iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5335 %{
 5336   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5337 
 5338   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4);
 5339 
 5340   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5341 
 5342   format %{
 5343     "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5344     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB"
 5345   %}
 5346 
 5347   ins_encode %{
 5348     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5349                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5350                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5351   %}
 5352 
 5353   ins_pipe(pipe_slow);
 5354 %}
 5355 
 5356 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5357                          iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5358 %{
 5359   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5360 
 5361   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4);
 5362 
 5363   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5364 
 5365   format %{
 5366     "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5367     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS"
 5368   %}
 5369 
 5370   ins_encode %{
 5371     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5372                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5373                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5374   %}
 5375 
 5376   ins_pipe(pipe_slow);
 5377 %}
 5378 
 5379 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5380 %{
 5381   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5382 
 5383   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5384 
 5385   format %{
 5386     "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5387     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI"
 5388   %}
 5389 
 5390   ins_encode(riscv_enc_cmpxchgw(res, mem, oldval, newval));
 5391 
 5392   ins_pipe(pipe_slow);
 5393 %}
 5394 
 5395 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5396 %{
 5397   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5398 
 5399   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5400 
 5401   format %{
 5402     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5403     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL"
 5404   %}
 5405 
 5406   ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval));
 5407 
 5408   ins_pipe(pipe_slow);
 5409 %}
 5410 
 5411 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5412 %{
 5413   predicate(n->as_LoadStore()->barrier_data() == 0);
 5414 
 5415   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5416 
 5417   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5418 
 5419   format %{
 5420     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5421     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP"
 5422   %}
 5423 
 5424   ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval));
 5425 
 5426   ins_pipe(pipe_slow);
 5427 %}
 5428 
 5429 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5430 %{
 5431   predicate(n->as_LoadStore()->barrier_data() == 0);
 5432   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5433 
 5434   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4);
 5435 
 5436   format %{
 5437     "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5438     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN"
 5439   %}
 5440 
 5441   ins_encode(riscv_enc_cmpxchgn(res, mem, oldval, newval));
 5442 
 5443   ins_pipe(pipe_slow);
 5444 %}
 5445 
 5446 // alternative CompareAndSwapX when we are eliding barriers
 5447 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5448                             iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5449 %{
 5450   predicate(needs_acquiring_load_reserved(n));
 5451 
 5452   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5453 
 5454   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4);
 5455 
 5456   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5457 
 5458   format %{
 5459     "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5460     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq"
 5461   %}
 5462 
 5463   ins_encode %{
 5464     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5465                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5466                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5467   %}
 5468 
 5469   ins_pipe(pipe_slow);
 5470 %}
 5471 
 5472 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5473                             iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5474 %{
 5475   predicate(needs_acquiring_load_reserved(n));
 5476 
 5477   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5478 
 5479   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4);
 5480 
 5481   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5482 
 5483   format %{
 5484     "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5485     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq"
 5486   %}
 5487 
 5488   ins_encode %{
 5489     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5490                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5491                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5492   %}
 5493 
 5494   ins_pipe(pipe_slow);
 5495 %}
 5496 
 5497 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5498 %{
 5499   predicate(needs_acquiring_load_reserved(n));
 5500 
 5501   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5502 
 5503   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5504 
 5505   format %{
 5506     "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5507     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq"
 5508   %}
 5509 
 5510   ins_encode(riscv_enc_cmpxchgw_acq(res, mem, oldval, newval));
 5511 
 5512   ins_pipe(pipe_slow);
 5513 %}
 5514 
 5515 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5516 %{
 5517   predicate(needs_acquiring_load_reserved(n));
 5518 
 5519   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5520 
 5521   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5522 
 5523   format %{
 5524     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5525     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq"
 5526   %}
 5527 
 5528   ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval));
 5529 
 5530   ins_pipe(pipe_slow);
 5531 %}
 5532 
 5533 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5534 %{
 5535   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5536 
 5537   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5538 
 5539   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5540 
 5541   format %{
 5542     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5543     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq"
 5544   %}
 5545 
 5546   ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval));
 5547 
 5548   ins_pipe(pipe_slow);
 5549 %}
 5550 
 5551 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5552 %{
 5553   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5554 
 5555   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5556 
 5557   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4);
 5558 
 5559   format %{
 5560     "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5561     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq"
 5562   %}
 5563 
 5564   ins_encode(riscv_enc_cmpxchgn_acq(res, mem, oldval, newval));
 5565 
 5566   ins_pipe(pipe_slow);
 5567 %}
 5568 
 5569 // Sundry CAS operations.  Note that release is always true,
 5570 // regardless of the memory ordering of the CAS.  This is because we
 5571 // need the volatile case to be sequentially consistent but there is
 5572 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 5573 // can't check the type of memory ordering here, so we always emit a
 5574 // sc_d(w) with rl bit set.
 5575 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5576                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5577 %{
 5578   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5579 
 5580   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5);
 5581 
 5582   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5583 
 5584   format %{
 5585     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB"
 5586   %}
 5587 
 5588   ins_encode %{
 5589     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5590                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5591                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5592   %}
 5593 
 5594   ins_pipe(pipe_slow);
 5595 %}
 5596 
 5597 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5598                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5599 %{
 5600   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5601 
 5602   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6);
 5603 
 5604   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5605 
 5606   format %{
 5607     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS"
 5608   %}
 5609 
 5610   ins_encode %{
 5611     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5612                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5613                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5614   %}
 5615 
 5616   ins_pipe(pipe_slow);
 5617 %}
 5618 
 5619 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5620 %{
 5621   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5622 
 5623   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5624 
 5625   effect(TEMP_DEF res);
 5626 
 5627   format %{
 5628     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI"
 5629   %}
 5630 
 5631   ins_encode %{
 5632     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5633                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5634   %}
 5635 
 5636   ins_pipe(pipe_slow);
 5637 %}
 5638 
 5639 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5640 %{
 5641   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5642 
 5643   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5644 
 5645   effect(TEMP_DEF res);
 5646 
 5647   format %{
 5648     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL"
 5649   %}
 5650 
 5651   ins_encode %{
 5652     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5653                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5654   %}
 5655 
 5656   ins_pipe(pipe_slow);
 5657 %}
 5658 
 5659 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5660 %{
 5661   predicate(n->as_LoadStore()->barrier_data() == 0);
 5662   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5663 
 5664   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 3);
 5665 
 5666   effect(TEMP_DEF res);
 5667 
 5668   format %{
 5669     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN"
 5670   %}
 5671 
 5672   ins_encode %{
 5673     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5674                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5675   %}
 5676 
 5677   ins_pipe(pipe_slow);
 5678 %}
 5679 
 5680 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5681 %{
 5682   predicate(n->as_LoadStore()->barrier_data() == 0);
 5683   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5684 
 5685   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5686 
 5687   effect(TEMP_DEF res);
 5688 
 5689   format %{
 5690     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP"
 5691   %}
 5692 
 5693   ins_encode %{
 5694     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5695                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5696   %}
 5697 
 5698   ins_pipe(pipe_slow);
 5699 %}
 5700 
 5701 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5702                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5703 %{
 5704   predicate(needs_acquiring_load_reserved(n));
 5705 
 5706   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5707 
 5708   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5);
 5709 
 5710   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5711 
 5712   format %{
 5713     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq"
 5714   %}
 5715 
 5716   ins_encode %{
 5717     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5718                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5719                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5720   %}
 5721 
 5722   ins_pipe(pipe_slow);
 5723 %}
 5724 
 5725 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5726                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5727 %{
 5728   predicate(needs_acquiring_load_reserved(n));
 5729 
 5730   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5731 
 5732   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6);
 5733 
 5734   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5735 
 5736   format %{
 5737     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq"
 5738   %}
 5739 
 5740   ins_encode %{
 5741     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5742                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5743                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5744   %}
 5745 
 5746   ins_pipe(pipe_slow);
 5747 %}
 5748 
 5749 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5750 %{
 5751   predicate(needs_acquiring_load_reserved(n));
 5752 
 5753   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5754 
 5755   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5756 
 5757   effect(TEMP_DEF res);
 5758 
 5759   format %{
 5760     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq"
 5761   %}
 5762 
 5763   ins_encode %{
 5764     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5765                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5766   %}
 5767 
 5768   ins_pipe(pipe_slow);
 5769 %}
 5770 
 5771 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5772 %{
 5773   predicate(needs_acquiring_load_reserved(n));
 5774 
 5775   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5776 
 5777   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5778 
 5779   effect(TEMP_DEF res);
 5780 
 5781   format %{
 5782     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq"
 5783   %}
 5784 
 5785   ins_encode %{
 5786     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5787                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5788   %}
 5789 
 5790   ins_pipe(pipe_slow);
 5791 %}
 5792 
 5793 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5794 %{
 5795   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5796 
 5797   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5798 
 5799   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5800 
 5801   effect(TEMP_DEF res);
 5802 
 5803   format %{
 5804     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq"
 5805   %}
 5806 
 5807   ins_encode %{
 5808     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5809                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5810   %}
 5811 
 5812   ins_pipe(pipe_slow);
 5813 %}
 5814 
 5815 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5816 %{
 5817   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5818 
 5819   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5820 
 5821   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5822 
 5823   effect(TEMP_DEF res);
 5824 
 5825   format %{
 5826     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq"
 5827   %}
 5828 
 5829   ins_encode %{
 5830     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5831                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5832   %}
 5833 
 5834   ins_pipe(pipe_slow);
 5835 %}
 5836 
 5837 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5838                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5839 %{
 5840   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5841 
 5842   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6);
 5843 
 5844   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5845 
 5846   format %{
 5847     "cmpxchg_weak $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5848     "# $res == 1 when success, #@weakCompareAndSwapB"
 5849   %}
 5850 
 5851   ins_encode %{
 5852     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5853                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5854                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5855   %}
 5856 
 5857   ins_pipe(pipe_slow);
 5858 %}
 5859 
 5860 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5861                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5862 %{
 5863   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5864 
 5865   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7);
 5866 
 5867   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5868 
 5869   format %{
 5870     "cmpxchg_weak $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5871     "# $res == 1 when success, #@weakCompareAndSwapS"
 5872   %}
 5873 
 5874   ins_encode %{
 5875     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5876                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5877                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5878   %}
 5879 
 5880   ins_pipe(pipe_slow);
 5881 %}
 5882 
 5883 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5884 %{
 5885   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 5886 
 5887   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5888 
 5889   format %{
 5890     "cmpxchg_weak $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5891     "# $res == 1 when success, #@weakCompareAndSwapI"
 5892   %}
 5893 
 5894   ins_encode %{
 5895     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5896                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5897   %}
 5898 
 5899   ins_pipe(pipe_slow);
 5900 %}
 5901 
 5902 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5903 %{
 5904   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 5905 
 5906   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5907 
 5908   format %{
 5909     "cmpxchg_weak $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5910     "# $res == 1 when success, #@weakCompareAndSwapL"
 5911   %}
 5912 
 5913   ins_encode %{
 5914     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5915                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5916   %}
 5917 
 5918   ins_pipe(pipe_slow);
 5919 %}
 5920 
 5921 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5922 %{
 5923   predicate(n->as_LoadStore()->barrier_data() == 0);
 5924   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 5925 
 5926   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4);
 5927 
 5928   format %{
 5929     "cmpxchg_weak $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5930     "# $res == 1 when success, #@weakCompareAndSwapN"
 5931   %}
 5932 
 5933   ins_encode %{
 5934     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5935                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5936   %}
 5937 
 5938   ins_pipe(pipe_slow);
 5939 %}
 5940 
 5941 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5942 %{
 5943   predicate(n->as_LoadStore()->barrier_data() == 0);
 5944   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 5945 
 5946   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5947 
 5948   format %{
 5949     "cmpxchg_weak $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5950     "# $res == 1 when success, #@weakCompareAndSwapP"
 5951   %}
 5952 
 5953   ins_encode %{
 5954     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5955                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5956   %}
 5957 
 5958   ins_pipe(pipe_slow);
 5959 %}
 5960 
 5961 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5962                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5963 %{
 5964   predicate(needs_acquiring_load_reserved(n));
 5965 
 5966   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5967 
 5968   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6);
 5969 
 5970   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5971 
 5972   format %{
 5973     "cmpxchg_weak_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5974     "# $res == 1 when success, #@weakCompareAndSwapBAcq"
 5975   %}
 5976 
 5977   ins_encode %{
 5978     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5979                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5980                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5981   %}
 5982 
 5983   ins_pipe(pipe_slow);
 5984 %}
 5985 
 5986 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5987                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5988 %{
 5989   predicate(needs_acquiring_load_reserved(n));
 5990 
 5991   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5992 
 5993   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7);
 5994 
 5995   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5996 
 5997   format %{
 5998     "cmpxchg_weak_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5999     "# $res == 1 when success, #@weakCompareAndSwapSAcq"
 6000   %}
 6001 
 6002   ins_encode %{
 6003     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 6004                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 6005                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 6006   %}
 6007 
 6008   ins_pipe(pipe_slow);
 6009 %}
 6010 
 6011 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 6012 %{
 6013   predicate(needs_acquiring_load_reserved(n));
 6014 
 6015   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 6016 
 6017   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 6018 
 6019   format %{
 6020     "cmpxchg_weak_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6021     "# $res == 1 when success, #@weakCompareAndSwapIAcq"
 6022   %}
 6023 
 6024   ins_encode %{
 6025     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 6026                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6027   %}
 6028 
 6029   ins_pipe(pipe_slow);
 6030 %}
 6031 
 6032 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 6033 %{
 6034   predicate(needs_acquiring_load_reserved(n));
 6035 
 6036   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 6037 
 6038   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 6039 
 6040   format %{
 6041     "cmpxchg_weak_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6042     "# $res == 1 when success, #@weakCompareAndSwapLAcq"
 6043   %}
 6044 
 6045   ins_encode %{
 6046     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 6047                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6048   %}
 6049 
 6050   ins_pipe(pipe_slow);
 6051 %}
 6052 
 6053 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 6054 %{
 6055   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 6056 
 6057   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 6058 
 6059   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4);
 6060 
 6061   format %{
 6062     "cmpxchg_weak_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6063     "# $res == 1 when success, #@weakCompareAndSwapNAcq"
 6064   %}
 6065 
 6066   ins_encode %{
 6067     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 6068                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6069   %}
 6070 
 6071   ins_pipe(pipe_slow);
 6072 %}
 6073 
 6074 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 6075 %{
 6076   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 6077 
 6078   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 6079 
 6080   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 6081 
 6082   format %{
 6083     "cmpxchg_weak_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6084     "\t# $res == 1 when success, #@weakCompareAndSwapPAcq"
 6085   %}
 6086 
 6087   ins_encode %{
 6088     __ cmpxchg_weak(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 6089                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6090   %}
 6091 
 6092   ins_pipe(pipe_slow);
 6093 %}
 6094 
 6095 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev)
 6096 %{
 6097   match(Set prev (GetAndSetI mem newv));
 6098 
 6099   ins_cost(ALU_COST);
 6100 
 6101   format %{ "atomic_xchgw  $prev, $newv, [$mem]\t#@get_and_setI" %}
 6102 
 6103   ins_encode %{
 6104     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6105   %}
 6106 
 6107   ins_pipe(pipe_serial);
 6108 %}
 6109 
 6110 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev)
 6111 %{
 6112   match(Set prev (GetAndSetL mem newv));
 6113 
 6114   ins_cost(ALU_COST);
 6115 
 6116   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setL" %}
 6117 
 6118   ins_encode %{
 6119     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6120   %}
 6121 
 6122   ins_pipe(pipe_serial);
 6123 %}
 6124 
 6125 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev)
 6126 %{
 6127   predicate(n->as_LoadStore()->barrier_data() == 0);
 6128 
 6129   match(Set prev (GetAndSetN mem newv));
 6130 
 6131   ins_cost(ALU_COST);
 6132 
 6133   format %{ "atomic_xchgwu $prev, $newv, [$mem]\t#@get_and_setN" %}
 6134 
 6135   ins_encode %{
 6136     __ atomic_xchgwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6137   %}
 6138 
 6139   ins_pipe(pipe_serial);
 6140 %}
 6141 
 6142 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev)
 6143 %{
 6144   predicate(n->as_LoadStore()->barrier_data() == 0);
 6145   match(Set prev (GetAndSetP mem newv));
 6146 
 6147   ins_cost(ALU_COST);
 6148 
 6149   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setP" %}
 6150 
 6151   ins_encode %{
 6152     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6153   %}
 6154 
 6155   ins_pipe(pipe_serial);
 6156 %}
 6157 
 6158 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev)
 6159 %{
 6160   predicate(needs_acquiring_load_reserved(n));
 6161 
 6162   match(Set prev (GetAndSetI mem newv));
 6163 
 6164   ins_cost(ALU_COST);
 6165 
 6166   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]\t#@get_and_setIAcq" %}
 6167 
 6168   ins_encode %{
 6169     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6170   %}
 6171 
 6172   ins_pipe(pipe_serial);
 6173 %}
 6174 
 6175 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev)
 6176 %{
 6177   predicate(needs_acquiring_load_reserved(n));
 6178 
 6179   match(Set prev (GetAndSetL mem newv));
 6180 
 6181   ins_cost(ALU_COST);
 6182 
 6183   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setLAcq" %}
 6184 
 6185   ins_encode %{
 6186     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6187   %}
 6188 
 6189   ins_pipe(pipe_serial);
 6190 %}
 6191 
 6192 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev)
 6193 %{
 6194   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 6195 
 6196   match(Set prev (GetAndSetN mem newv));
 6197 
 6198   ins_cost(ALU_COST);
 6199 
 6200   format %{ "atomic_xchgwu_acq $prev, $newv, [$mem]\t#@get_and_setNAcq" %}
 6201 
 6202   ins_encode %{
 6203     __ atomic_xchgalwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6204   %}
 6205 
 6206   ins_pipe(pipe_serial);
 6207 %}
 6208 
 6209 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev)
 6210 %{
 6211   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 6212 
 6213   match(Set prev (GetAndSetP mem newv));
 6214 
 6215   ins_cost(ALU_COST);
 6216 
 6217   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setPAcq" %}
 6218 
 6219   ins_encode %{
 6220     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6221   %}
 6222 
 6223   ins_pipe(pipe_serial);
 6224 %}
 6225 
 6226 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr)
 6227 %{
 6228   match(Set newval (GetAndAddL mem incr));
 6229 
 6230   ins_cost(ALU_COST);
 6231 
 6232   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addL" %}
 6233 
 6234   ins_encode %{
 6235     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6236   %}
 6237 
 6238   ins_pipe(pipe_serial);
 6239 %}
 6240 
 6241 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr)
 6242 %{
 6243   predicate(n->as_LoadStore()->result_not_used());
 6244 
 6245   match(Set dummy (GetAndAddL mem incr));
 6246 
 6247   ins_cost(ALU_COST);
 6248 
 6249   format %{ "get_and_addL [$mem], $incr\t#@get_and_addL_no_res" %}
 6250 
 6251   ins_encode %{
 6252     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 6253   %}
 6254 
 6255   ins_pipe(pipe_serial);
 6256 %}
 6257 
 6258 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAdd incr)
 6259 %{
 6260   match(Set newval (GetAndAddL mem incr));
 6261 
 6262   ins_cost(ALU_COST);
 6263 
 6264   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addLi" %}
 6265 
 6266   ins_encode %{
 6267     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6268   %}
 6269 
 6270   ins_pipe(pipe_serial);
 6271 %}
 6272 
 6273 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAdd incr)
 6274 %{
 6275   predicate(n->as_LoadStore()->result_not_used());
 6276 
 6277   match(Set dummy (GetAndAddL mem incr));
 6278 
 6279   ins_cost(ALU_COST);
 6280 
 6281   format %{ "get_and_addL [$mem], $incr\t#@get_and_addLi_no_res" %}
 6282 
 6283   ins_encode %{
 6284     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 6285   %}
 6286 
 6287   ins_pipe(pipe_serial);
 6288 %}
 6289 
 6290 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6291 %{
 6292   match(Set newval (GetAndAddI mem incr));
 6293 
 6294   ins_cost(ALU_COST);
 6295 
 6296   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addI" %}
 6297 
 6298   ins_encode %{
 6299     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6300   %}
 6301 
 6302   ins_pipe(pipe_serial);
 6303 %}
 6304 
 6305 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr)
 6306 %{
 6307   predicate(n->as_LoadStore()->result_not_used());
 6308 
 6309   match(Set dummy (GetAndAddI mem incr));
 6310 
 6311   ins_cost(ALU_COST);
 6312 
 6313   format %{ "get_and_addI [$mem], $incr\t#@get_and_addI_no_res" %}
 6314 
 6315   ins_encode %{
 6316     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 6317   %}
 6318 
 6319   ins_pipe(pipe_serial);
 6320 %}
 6321 
 6322 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAdd incr)
 6323 %{
 6324   match(Set newval (GetAndAddI mem incr));
 6325 
 6326   ins_cost(ALU_COST);
 6327 
 6328   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addIi" %}
 6329 
 6330   ins_encode %{
 6331     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6332   %}
 6333 
 6334   ins_pipe(pipe_serial);
 6335 %}
 6336 
 6337 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAdd incr)
 6338 %{
 6339   predicate(n->as_LoadStore()->result_not_used());
 6340 
 6341   match(Set dummy (GetAndAddI mem incr));
 6342 
 6343   ins_cost(ALU_COST);
 6344 
 6345   format %{ "get_and_addI [$mem], $incr\t#@get_and_addIi_no_res" %}
 6346 
 6347   ins_encode %{
 6348     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 6349   %}
 6350 
 6351   ins_pipe(pipe_serial);
 6352 %}
 6353 
 6354 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr)
 6355 %{
 6356   predicate(needs_acquiring_load_reserved(n));
 6357 
 6358   match(Set newval (GetAndAddL mem incr));
 6359 
 6360   ins_cost(ALU_COST);
 6361 
 6362   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLAcq" %}
 6363 
 6364   ins_encode %{
 6365     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6366   %}
 6367 
 6368   ins_pipe(pipe_serial);
 6369 %}
 6370 
 6371 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 6372   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6373 
 6374   match(Set dummy (GetAndAddL mem incr));
 6375 
 6376   ins_cost(ALU_COST);
 6377 
 6378   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addL_no_resAcq" %}
 6379 
 6380   ins_encode %{
 6381     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 6382   %}
 6383 
 6384   ins_pipe(pipe_serial);
 6385 %}
 6386 
 6387 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAdd incr)
 6388 %{
 6389   predicate(needs_acquiring_load_reserved(n));
 6390 
 6391   match(Set newval (GetAndAddL mem incr));
 6392 
 6393   ins_cost(ALU_COST);
 6394 
 6395   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLiAcq" %}
 6396 
 6397   ins_encode %{
 6398     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6399   %}
 6400 
 6401   ins_pipe(pipe_serial);
 6402 %}
 6403 
 6404 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAdd incr)
 6405 %{
 6406   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6407 
 6408   match(Set dummy (GetAndAddL mem incr));
 6409 
 6410   ins_cost(ALU_COST);
 6411 
 6412   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addLi_no_resAcq" %}
 6413 
 6414   ins_encode %{
 6415     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 6416   %}
 6417 
 6418   ins_pipe(pipe_serial);
 6419 %}
 6420 
 6421 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6422 %{
 6423   predicate(needs_acquiring_load_reserved(n));
 6424 
 6425   match(Set newval (GetAndAddI mem incr));
 6426 
 6427   ins_cost(ALU_COST);
 6428 
 6429   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIAcq" %}
 6430 
 6431   ins_encode %{
 6432     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6433   %}
 6434 
 6435   ins_pipe(pipe_serial);
 6436 %}
 6437 
 6438 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr)
 6439 %{
 6440   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6441 
 6442   match(Set dummy (GetAndAddI mem incr));
 6443 
 6444   ins_cost(ALU_COST);
 6445 
 6446   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addI_no_resAcq" %}
 6447 
 6448   ins_encode %{
 6449     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 6450   %}
 6451 
 6452   ins_pipe(pipe_serial);
 6453 %}
 6454 
 6455 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAdd incr)
 6456 %{
 6457   predicate(needs_acquiring_load_reserved(n));
 6458 
 6459   match(Set newval (GetAndAddI mem incr));
 6460 
 6461   ins_cost(ALU_COST);
 6462 
 6463   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIiAcq" %}
 6464 
 6465   ins_encode %{
 6466     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6467   %}
 6468 
 6469   ins_pipe(pipe_serial);
 6470 %}
 6471 
 6472 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAdd incr)
 6473 %{
 6474   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6475 
 6476   match(Set dummy (GetAndAddI mem incr));
 6477 
 6478   ins_cost(ALU_COST);
 6479 
 6480   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addIi_no_resAcq" %}
 6481 
 6482   ins_encode %{
 6483     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 6484   %}
 6485 
 6486   ins_pipe(pipe_serial);
 6487 %}
 6488 
 6489 // ============================================================================
 6490 // Arithmetic Instructions
 6491 //
 6492 
 6493 // Integer Addition
 6494 
 6495 // TODO
 6496 // these currently employ operations which do not set CR and hence are
 6497 // not flagged as killing CR but we would like to isolate the cases
 6498 // where we want to set flags from those where we don't. need to work
 6499 // out how to do that.
 6500 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6501   match(Set dst (AddI src1 src2));
 6502 
 6503   ins_cost(ALU_COST);
 6504   format %{ "addw  $dst, $src1, $src2\t#@addI_reg_reg" %}
 6505 
 6506   ins_encode %{
 6507     __ addw(as_Register($dst$$reg),
 6508             as_Register($src1$$reg),
 6509             as_Register($src2$$reg));
 6510   %}
 6511 
 6512   ins_pipe(ialu_reg_reg);
 6513 %}
 6514 
 6515 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAdd src2) %{
 6516   match(Set dst (AddI src1 src2));
 6517 
 6518   ins_cost(ALU_COST);
 6519   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm" %}
 6520 
 6521   ins_encode %{
 6522     int32_t con = (int32_t)$src2$$constant;
 6523     __ addiw(as_Register($dst$$reg),
 6524              as_Register($src1$$reg),
 6525              $src2$$constant);
 6526   %}
 6527 
 6528   ins_pipe(ialu_reg_imm);
 6529 %}
 6530 
 6531 instruct addI_reg_imm_l2i(iRegINoSp dst, iRegL src1, immIAdd src2) %{
 6532   match(Set dst (AddI (ConvL2I src1) src2));
 6533 
 6534   ins_cost(ALU_COST);
 6535   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm_l2i" %}
 6536 
 6537   ins_encode %{
 6538     __ addiw(as_Register($dst$$reg),
 6539              as_Register($src1$$reg),
 6540              $src2$$constant);
 6541   %}
 6542 
 6543   ins_pipe(ialu_reg_imm);
 6544 %}
 6545 
 6546 // Pointer Addition
 6547 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
 6548   match(Set dst (AddP src1 src2));
 6549 
 6550   ins_cost(ALU_COST);
 6551   format %{ "add $dst, $src1, $src2\t# ptr, #@addP_reg_reg" %}
 6552 
 6553   ins_encode %{
 6554     __ add(as_Register($dst$$reg),
 6555            as_Register($src1$$reg),
 6556            as_Register($src2$$reg));
 6557   %}
 6558 
 6559   ins_pipe(ialu_reg_reg);
 6560 %}
 6561 
 6562 // If we shift more than 32 bits, we need not convert I2L.
 6563 instruct lShiftL_regI_immGE32(iRegLNoSp dst, iRegI src, uimmI6_ge32 scale) %{
 6564   match(Set dst (LShiftL (ConvI2L src) scale));
 6565   ins_cost(ALU_COST);
 6566   format %{ "slli  $dst, $src, $scale & 63\t#@lShiftL_regI_immGE32" %}
 6567 
 6568   ins_encode %{
 6569     __ slli(as_Register($dst$$reg), as_Register($src$$reg), $scale$$constant & 63);
 6570   %}
 6571 
 6572   ins_pipe(ialu_reg_shift);
 6573 %}
 6574 
 6575 // Pointer Immediate Addition
 6576 // n.b. this needs to be more expensive than using an indirect memory
 6577 // operand
 6578 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAdd src2) %{
 6579   match(Set dst (AddP src1 src2));
 6580   ins_cost(ALU_COST);
 6581   format %{ "addi  $dst, $src1, $src2\t# ptr, #@addP_reg_imm" %}
 6582 
 6583   ins_encode %{
 6584     // src2 is imm, so actually call the addi
 6585     __ add(as_Register($dst$$reg),
 6586            as_Register($src1$$reg),
 6587            $src2$$constant);
 6588   %}
 6589 
 6590   ins_pipe(ialu_reg_imm);
 6591 %}
 6592 
 6593 // Long Addition
 6594 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6595   match(Set dst (AddL src1 src2));
 6596   ins_cost(ALU_COST);
 6597   format %{ "add  $dst, $src1, $src2\t#@addL_reg_reg" %}
 6598 
 6599   ins_encode %{
 6600     __ add(as_Register($dst$$reg),
 6601            as_Register($src1$$reg),
 6602            as_Register($src2$$reg));
 6603   %}
 6604 
 6605   ins_pipe(ialu_reg_reg);
 6606 %}
 6607 
 6608 // No constant pool entries requiredLong Immediate Addition.
 6609 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 6610   match(Set dst (AddL src1 src2));
 6611   ins_cost(ALU_COST);
 6612   format %{ "addi  $dst, $src1, $src2\t#@addL_reg_imm" %}
 6613 
 6614   ins_encode %{
 6615     // src2 is imm, so actually call the addi
 6616     __ add(as_Register($dst$$reg),
 6617            as_Register($src1$$reg),
 6618            $src2$$constant);
 6619   %}
 6620 
 6621   ins_pipe(ialu_reg_imm);
 6622 %}
 6623 
 6624 // Integer Subtraction
 6625 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6626   match(Set dst (SubI src1 src2));
 6627 
 6628   ins_cost(ALU_COST);
 6629   format %{ "subw  $dst, $src1, $src2\t#@subI_reg_reg" %}
 6630 
 6631   ins_encode %{
 6632     __ subw(as_Register($dst$$reg),
 6633             as_Register($src1$$reg),
 6634             as_Register($src2$$reg));
 6635   %}
 6636 
 6637   ins_pipe(ialu_reg_reg);
 6638 %}
 6639 
 6640 // Immediate Subtraction
 6641 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immISub src2) %{
 6642   match(Set dst (SubI src1 src2));
 6643 
 6644   ins_cost(ALU_COST);
 6645   format %{ "addiw  $dst, $src1, -$src2\t#@subI_reg_imm" %}
 6646 
 6647   ins_encode %{
 6648     // src2 is imm, so actually call the addiw
 6649     __ subw(as_Register($dst$$reg),
 6650             as_Register($src1$$reg),
 6651             $src2$$constant);
 6652   %}
 6653 
 6654   ins_pipe(ialu_reg_imm);
 6655 %}
 6656 
 6657 // Long Subtraction
 6658 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6659   match(Set dst (SubL src1 src2));
 6660   ins_cost(ALU_COST);
 6661   format %{ "sub  $dst, $src1, $src2\t#@subL_reg_reg" %}
 6662 
 6663   ins_encode %{
 6664     __ sub(as_Register($dst$$reg),
 6665            as_Register($src1$$reg),
 6666            as_Register($src2$$reg));
 6667   %}
 6668 
 6669   ins_pipe(ialu_reg_reg);
 6670 %}
 6671 
 6672 // No constant pool entries requiredLong Immediate Subtraction.
 6673 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLSub src2) %{
 6674   match(Set dst (SubL src1 src2));
 6675   ins_cost(ALU_COST);
 6676   format %{ "addi  $dst, $src1, -$src2\t#@subL_reg_imm" %}
 6677 
 6678   ins_encode %{
 6679     // src2 is imm, so actually call the addi
 6680     __ sub(as_Register($dst$$reg),
 6681            as_Register($src1$$reg),
 6682            $src2$$constant);
 6683   %}
 6684 
 6685   ins_pipe(ialu_reg_imm);
 6686 %}
 6687 
 6688 // Integer Negation (special case for sub)
 6689 
 6690 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 6691   match(Set dst (SubI zero src));
 6692   ins_cost(ALU_COST);
 6693   format %{ "subw  $dst, x0, $src\t# int, #@negI_reg" %}
 6694 
 6695   ins_encode %{
 6696     // actually call the subw
 6697     __ negw(as_Register($dst$$reg),
 6698             as_Register($src$$reg));
 6699   %}
 6700 
 6701   ins_pipe(ialu_reg);
 6702 %}
 6703 
 6704 // Long Negation
 6705 
 6706 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero) %{
 6707   match(Set dst (SubL zero src));
 6708   ins_cost(ALU_COST);
 6709   format %{ "sub  $dst, x0, $src\t# long, #@negL_reg" %}
 6710 
 6711   ins_encode %{
 6712     // actually call the sub
 6713     __ neg(as_Register($dst$$reg),
 6714            as_Register($src$$reg));
 6715   %}
 6716 
 6717   ins_pipe(ialu_reg);
 6718 %}
 6719 
 6720 // Integer Multiply
 6721 
 6722 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6723   match(Set dst (MulI src1 src2));
 6724   ins_cost(IMUL_COST);
 6725   format %{ "mulw  $dst, $src1, $src2\t#@mulI" %}
 6726 
 6727   //this means 2 word multi, and no sign extend to 64 bits
 6728   ins_encode %{
 6729     // riscv64 mulw will sign-extension to high 32 bits in dst reg
 6730     __ mulw(as_Register($dst$$reg),
 6731             as_Register($src1$$reg),
 6732             as_Register($src2$$reg));
 6733   %}
 6734 
 6735   ins_pipe(imul_reg_reg);
 6736 %}
 6737 
 6738 // Long Multiply
 6739 
 6740 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6741   match(Set dst (MulL src1 src2));
 6742   ins_cost(IMUL_COST);
 6743   format %{ "mul  $dst, $src1, $src2\t#@mulL" %}
 6744 
 6745   ins_encode %{
 6746     __ mul(as_Register($dst$$reg),
 6747            as_Register($src1$$reg),
 6748            as_Register($src2$$reg));
 6749   %}
 6750 
 6751   ins_pipe(lmul_reg_reg);
 6752 %}
 6753 
 6754 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6755 %{
 6756   match(Set dst (MulHiL src1 src2));
 6757   ins_cost(IMUL_COST);
 6758   format %{ "mulh  $dst, $src1, $src2\t# mulhi, #@mulHiL_rReg" %}
 6759 
 6760   ins_encode %{
 6761     __ mulh(as_Register($dst$$reg),
 6762             as_Register($src1$$reg),
 6763             as_Register($src2$$reg));
 6764   %}
 6765 
 6766   ins_pipe(lmul_reg_reg);
 6767 %}
 6768 
 6769 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6770 %{
 6771   match(Set dst (UMulHiL src1 src2));
 6772   ins_cost(IMUL_COST);
 6773   format %{ "mulhu  $dst, $src1, $src2\t# umulhi, #@umulHiL_rReg" %}
 6774 
 6775   ins_encode %{
 6776     __ mulhu(as_Register($dst$$reg),
 6777              as_Register($src1$$reg),
 6778              as_Register($src2$$reg));
 6779   %}
 6780 
 6781   ins_pipe(lmul_reg_reg);
 6782 %}
 6783 
 6784 // Integer Divide
 6785 
 6786 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6787   match(Set dst (DivI src1 src2));
 6788   ins_cost(IDIVSI_COST);
 6789   format %{ "divw  $dst, $src1, $src2\t#@divI"%}
 6790 
 6791   ins_encode(riscv_enc_divw(dst, src1, src2));
 6792   ins_pipe(idiv_reg_reg);
 6793 %}
 6794 
 6795 instruct UdivI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6796   match(Set dst (UDivI src1 src2));
 6797   ins_cost(IDIVSI_COST);
 6798   format %{ "divuw  $dst, $src1, $src2\t#@UdivI"%}
 6799 
 6800   ins_encode(riscv_enc_divuw(dst, src1, src2));
 6801   ins_pipe(idiv_reg_reg);
 6802 %}
 6803 
 6804 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
 6805   match(Set dst (URShiftI (RShiftI src1 div1) div2));
 6806   ins_cost(ALU_COST);
 6807   format %{ "srliw $dst, $src1, $div1\t# int signExtract, #@signExtract" %}
 6808 
 6809   ins_encode %{
 6810     __ srliw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
 6811   %}
 6812   ins_pipe(ialu_reg_shift);
 6813 %}
 6814 
 6815 // Long Divide
 6816 
 6817 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6818   match(Set dst (DivL src1 src2));
 6819   ins_cost(IDIVDI_COST);
 6820   format %{ "div  $dst, $src1, $src2\t#@divL" %}
 6821 
 6822   ins_encode(riscv_enc_div(dst, src1, src2));
 6823   ins_pipe(ldiv_reg_reg);
 6824 %}
 6825 
 6826 instruct UdivL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6827   match(Set dst (UDivL src1 src2));
 6828   ins_cost(IDIVDI_COST);
 6829 
 6830   format %{ "divu $dst, $src1, $src2\t#@UdivL" %}
 6831 
 6832   ins_encode(riscv_enc_divu(dst, src1, src2));
 6833   ins_pipe(ldiv_reg_reg);
 6834 %}
 6835 
 6836 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
 6837   match(Set dst (URShiftL (RShiftL src1 div1) div2));
 6838   ins_cost(ALU_COST);
 6839   format %{ "srli $dst, $src1, $div1\t# long signExtract, #@signExtractL" %}
 6840 
 6841   ins_encode %{
 6842     __ srli(as_Register($dst$$reg), as_Register($src1$$reg), 63);
 6843   %}
 6844   ins_pipe(ialu_reg_shift);
 6845 %}
 6846 
 6847 // Integer Remainder
 6848 
 6849 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6850   match(Set dst (ModI src1 src2));
 6851   ins_cost(IDIVSI_COST);
 6852   format %{ "remw  $dst, $src1, $src2\t#@modI" %}
 6853 
 6854   ins_encode(riscv_enc_modw(dst, src1, src2));
 6855   ins_pipe(ialu_reg_reg);
 6856 %}
 6857 
 6858 instruct UmodI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6859   match(Set dst (UModI src1 src2));
 6860   ins_cost(IDIVSI_COST);
 6861   format %{ "remuw  $dst, $src1, $src2\t#@UmodI" %}
 6862 
 6863   ins_encode(riscv_enc_moduw(dst, src1, src2));
 6864   ins_pipe(ialu_reg_reg);
 6865 %}
 6866 
 6867 // Long Remainder
 6868 
 6869 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6870   match(Set dst (ModL src1 src2));
 6871   ins_cost(IDIVDI_COST);
 6872   format %{ "rem  $dst, $src1, $src2\t#@modL" %}
 6873 
 6874   ins_encode(riscv_enc_mod(dst, src1, src2));
 6875   ins_pipe(ialu_reg_reg);
 6876 %}
 6877 
 6878 instruct UmodL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6879   match(Set dst (UModL src1 src2));
 6880   ins_cost(IDIVDI_COST);
 6881   format %{ "remu  $dst, $src1, $src2\t#@UmodL" %}
 6882 
 6883   ins_encode(riscv_enc_modu(dst, src1, src2));
 6884   ins_pipe(ialu_reg_reg);
 6885 %}
 6886 
 6887 // Integer Shifts
 6888 
 6889 // Shift Left Register
 6890 // In RV64I, only the low 5 bits of src2 are considered for the shift amount
 6891 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6892   match(Set dst (LShiftI src1 src2));
 6893   ins_cost(ALU_COST);
 6894   format %{ "sllw  $dst, $src1, $src2\t#@lShiftI_reg_reg" %}
 6895 
 6896   ins_encode %{
 6897     __ sllw(as_Register($dst$$reg),
 6898             as_Register($src1$$reg),
 6899             as_Register($src2$$reg));
 6900   %}
 6901 
 6902   ins_pipe(ialu_reg_reg_vshift);
 6903 %}
 6904 
 6905 // Shift Left Immediate
 6906 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6907   match(Set dst (LShiftI src1 src2));
 6908   ins_cost(ALU_COST);
 6909   format %{ "slliw  $dst, $src1, ($src2 & 0x1f)\t#@lShiftI_reg_imm" %}
 6910 
 6911   ins_encode %{
 6912     // the shift amount is encoded in the lower
 6913     // 5 bits of the I-immediate field for RV32I
 6914     __ slliw(as_Register($dst$$reg),
 6915              as_Register($src1$$reg),
 6916              (unsigned) $src2$$constant & 0x1f);
 6917   %}
 6918 
 6919   ins_pipe(ialu_reg_shift);
 6920 %}
 6921 
 6922 // Shift Right Logical Register
 6923 // In RV64I, only the low 5 bits of src2 are considered for the shift amount
 6924 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6925   match(Set dst (URShiftI src1 src2));
 6926   ins_cost(ALU_COST);
 6927   format %{ "srlw  $dst, $src1, $src2\t#@urShiftI_reg_reg" %}
 6928 
 6929   ins_encode %{
 6930     __ srlw(as_Register($dst$$reg),
 6931             as_Register($src1$$reg),
 6932             as_Register($src2$$reg));
 6933   %}
 6934 
 6935   ins_pipe(ialu_reg_reg_vshift);
 6936 %}
 6937 
 6938 // Shift Right Logical Immediate
 6939 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6940   match(Set dst (URShiftI src1 src2));
 6941   ins_cost(ALU_COST);
 6942   format %{ "srliw  $dst, $src1, ($src2 & 0x1f)\t#@urShiftI_reg_imm" %}
 6943 
 6944   ins_encode %{
 6945     // the shift amount is encoded in the lower
 6946     // 6 bits of the I-immediate field for RV64I
 6947     __ srliw(as_Register($dst$$reg),
 6948              as_Register($src1$$reg),
 6949              (unsigned) $src2$$constant & 0x1f);
 6950   %}
 6951 
 6952   ins_pipe(ialu_reg_shift);
 6953 %}
 6954 
 6955 // Shift Right Arithmetic Register
 6956 // In RV64I, only the low 5 bits of src2 are considered for the shift amount
 6957 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6958   match(Set dst (RShiftI src1 src2));
 6959   ins_cost(ALU_COST);
 6960   format %{ "sraw  $dst, $src1, $src2\t#@rShiftI_reg_reg" %}
 6961 
 6962   ins_encode %{
 6963     // riscv will sign-ext dst high 32 bits
 6964     __ sraw(as_Register($dst$$reg),
 6965             as_Register($src1$$reg),
 6966             as_Register($src2$$reg));
 6967   %}
 6968 
 6969   ins_pipe(ialu_reg_reg_vshift);
 6970 %}
 6971 
 6972 // Shift Right Arithmetic Immediate
 6973 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6974   match(Set dst (RShiftI src1 src2));
 6975   ins_cost(ALU_COST);
 6976   format %{ "sraiw  $dst, $src1, ($src2 & 0x1f)\t#@rShiftI_reg_imm" %}
 6977 
 6978   ins_encode %{
 6979     // riscv will sign-ext dst high 32 bits
 6980     __ sraiw(as_Register($dst$$reg),
 6981              as_Register($src1$$reg),
 6982              (unsigned) $src2$$constant & 0x1f);
 6983   %}
 6984 
 6985   ins_pipe(ialu_reg_shift);
 6986 %}
 6987 
 6988 // Long Shifts
 6989 
 6990 // Shift Left Register
 6991 // In RV64I, only the low 6 bits of src2 are considered for the shift amount
 6992 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 6993   match(Set dst (LShiftL src1 src2));
 6994 
 6995   ins_cost(ALU_COST);
 6996   format %{ "sll  $dst, $src1, $src2\t#@lShiftL_reg_reg" %}
 6997 
 6998   ins_encode %{
 6999     __ sll(as_Register($dst$$reg),
 7000            as_Register($src1$$reg),
 7001            as_Register($src2$$reg));
 7002   %}
 7003 
 7004   ins_pipe(ialu_reg_reg_vshift);
 7005 %}
 7006 
 7007 // Shift Left Immediate
 7008 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 7009   match(Set dst (LShiftL src1 src2));
 7010 
 7011   ins_cost(ALU_COST);
 7012   format %{ "slli  $dst, $src1, ($src2 & 0x3f)\t#@lShiftL_reg_imm" %}
 7013 
 7014   ins_encode %{
 7015     // the shift amount is encoded in the lower
 7016     // 6 bits of the I-immediate field for RV64I
 7017     __ slli(as_Register($dst$$reg),
 7018             as_Register($src1$$reg),
 7019             (unsigned) $src2$$constant & 0x3f);
 7020   %}
 7021 
 7022   ins_pipe(ialu_reg_shift);
 7023 %}
 7024 
 7025 // Shift Right Logical Register
 7026 // In RV64I, only the low 6 bits of src2 are considered for the shift amount
 7027 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 7028   match(Set dst (URShiftL src1 src2));
 7029 
 7030   ins_cost(ALU_COST);
 7031   format %{ "srl  $dst, $src1, $src2\t#@urShiftL_reg_reg" %}
 7032 
 7033   ins_encode %{
 7034     __ srl(as_Register($dst$$reg),
 7035             as_Register($src1$$reg),
 7036             as_Register($src2$$reg));
 7037   %}
 7038 
 7039   ins_pipe(ialu_reg_reg_vshift);
 7040 %}
 7041 
 7042 // Shift Right Logical Immediate
 7043 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 7044   match(Set dst (URShiftL src1 src2));
 7045 
 7046   ins_cost(ALU_COST);
 7047   format %{ "srli  $dst, $src1, ($src2 & 0x3f)\t#@urShiftL_reg_imm" %}
 7048 
 7049   ins_encode %{
 7050     // the shift amount is encoded in the lower
 7051     // 6 bits of the I-immediate field for RV64I
 7052     __ srli(as_Register($dst$$reg),
 7053             as_Register($src1$$reg),
 7054             (unsigned) $src2$$constant & 0x3f);
 7055   %}
 7056 
 7057   ins_pipe(ialu_reg_shift);
 7058 %}
 7059 
 7060 // A special-case pattern for card table stores.
 7061 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
 7062   match(Set dst (URShiftL (CastP2X src1) src2));
 7063 
 7064   ins_cost(ALU_COST);
 7065   format %{ "srli  $dst, p2x($src1), ($src2 & 0x3f)\t#@urShiftP_reg_imm" %}
 7066 
 7067   ins_encode %{
 7068     // the shift amount is encoded in the lower
 7069     // 6 bits of the I-immediate field for RV64I
 7070     __ srli(as_Register($dst$$reg),
 7071             as_Register($src1$$reg),
 7072             (unsigned) $src2$$constant & 0x3f);
 7073   %}
 7074 
 7075   ins_pipe(ialu_reg_shift);
 7076 %}
 7077 
 7078 // Shift Right Arithmetic Register
 7079 // In RV64I, only the low 6 bits of src2 are considered for the shift amount
 7080 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 7081   match(Set dst (RShiftL src1 src2));
 7082 
 7083   ins_cost(ALU_COST);
 7084   format %{ "sra  $dst, $src1, $src2\t#@rShiftL_reg_reg" %}
 7085 
 7086   ins_encode %{
 7087     __ sra(as_Register($dst$$reg),
 7088            as_Register($src1$$reg),
 7089            as_Register($src2$$reg));
 7090   %}
 7091 
 7092   ins_pipe(ialu_reg_reg_vshift);
 7093 %}
 7094 
 7095 // Shift Right Arithmetic Immediate
 7096 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 7097   match(Set dst (RShiftL src1 src2));
 7098 
 7099   ins_cost(ALU_COST);
 7100   format %{ "srai  $dst, $src1, ($src2 & 0x3f)\t#@rShiftL_reg_imm" %}
 7101 
 7102   ins_encode %{
 7103     // the shift amount is encoded in the lower
 7104     // 6 bits of the I-immediate field for RV64I
 7105     __ srai(as_Register($dst$$reg),
 7106             as_Register($src1$$reg),
 7107             (unsigned) $src2$$constant & 0x3f);
 7108   %}
 7109 
 7110   ins_pipe(ialu_reg_shift);
 7111 %}
 7112 
 7113 instruct regI_not_reg(iRegINoSp dst, iRegI src1, immI_M1 m1) %{
 7114   match(Set dst (XorI src1 m1));
 7115   ins_cost(ALU_COST);
 7116   format %{ "xori  $dst, $src1, -1\t#@regI_not_reg" %}
 7117 
 7118   ins_encode %{
 7119     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7120   %}
 7121 
 7122   ins_pipe(ialu_reg_imm);
 7123 %}
 7124 
 7125 instruct regL_not_reg(iRegLNoSp dst, iRegL src1, immL_M1 m1) %{
 7126   match(Set dst (XorL src1 m1));
 7127   ins_cost(ALU_COST);
 7128   format %{ "xori  $dst, $src1, -1\t#@regL_not_reg" %}
 7129 
 7130   ins_encode %{
 7131     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7132   %}
 7133 
 7134   ins_pipe(ialu_reg_imm);
 7135 %}
 7136 
 7137 
 7138 // ============================================================================
 7139 // Floating Point Arithmetic Instructions
 7140 
 7141 instruct addF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7142   match(Set dst (AddF src1 src2));
 7143 
 7144   ins_cost(DEFAULT_COST * 5);
 7145   format %{ "fadd.s  $dst, $src1, $src2\t#@addF_reg_reg" %}
 7146 
 7147   ins_encode %{
 7148     __ fadd_s(as_FloatRegister($dst$$reg),
 7149               as_FloatRegister($src1$$reg),
 7150               as_FloatRegister($src2$$reg));
 7151   %}
 7152 
 7153   ins_pipe(fp_dop_reg_reg_s);
 7154 %}
 7155 
 7156 instruct addD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7157   match(Set dst (AddD src1 src2));
 7158 
 7159   ins_cost(DEFAULT_COST * 5);
 7160   format %{ "fadd.d  $dst, $src1, $src2\t#@addD_reg_reg" %}
 7161 
 7162   ins_encode %{
 7163     __ fadd_d(as_FloatRegister($dst$$reg),
 7164               as_FloatRegister($src1$$reg),
 7165               as_FloatRegister($src2$$reg));
 7166   %}
 7167 
 7168   ins_pipe(fp_dop_reg_reg_d);
 7169 %}
 7170 
 7171 instruct subF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7172   match(Set dst (SubF src1 src2));
 7173 
 7174   ins_cost(DEFAULT_COST * 5);
 7175   format %{ "fsub.s  $dst, $src1, $src2\t#@subF_reg_reg" %}
 7176 
 7177   ins_encode %{
 7178     __ fsub_s(as_FloatRegister($dst$$reg),
 7179               as_FloatRegister($src1$$reg),
 7180               as_FloatRegister($src2$$reg));
 7181   %}
 7182 
 7183   ins_pipe(fp_dop_reg_reg_s);
 7184 %}
 7185 
 7186 instruct subD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7187   match(Set dst (SubD src1 src2));
 7188 
 7189   ins_cost(DEFAULT_COST * 5);
 7190   format %{ "fsub.d  $dst, $src1, $src2\t#@subD_reg_reg" %}
 7191 
 7192   ins_encode %{
 7193     __ fsub_d(as_FloatRegister($dst$$reg),
 7194               as_FloatRegister($src1$$reg),
 7195               as_FloatRegister($src2$$reg));
 7196   %}
 7197 
 7198   ins_pipe(fp_dop_reg_reg_d);
 7199 %}
 7200 
 7201 instruct mulF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7202   match(Set dst (MulF src1 src2));
 7203 
 7204   ins_cost(FMUL_SINGLE_COST);
 7205   format %{ "fmul.s  $dst, $src1, $src2\t#@mulF_reg_reg" %}
 7206 
 7207   ins_encode %{
 7208     __ fmul_s(as_FloatRegister($dst$$reg),
 7209               as_FloatRegister($src1$$reg),
 7210               as_FloatRegister($src2$$reg));
 7211   %}
 7212 
 7213   ins_pipe(fp_dop_reg_reg_s);
 7214 %}
 7215 
 7216 instruct mulD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7217   match(Set dst (MulD src1 src2));
 7218 
 7219   ins_cost(FMUL_DOUBLE_COST);
 7220   format %{ "fmul.d  $dst, $src1, $src2\t#@mulD_reg_reg" %}
 7221 
 7222   ins_encode %{
 7223     __ fmul_d(as_FloatRegister($dst$$reg),
 7224               as_FloatRegister($src1$$reg),
 7225               as_FloatRegister($src2$$reg));
 7226   %}
 7227 
 7228   ins_pipe(fp_dop_reg_reg_d);
 7229 %}
 7230 
 7231 // src1 * src2 + src3
 7232 instruct maddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7233   match(Set dst (FmaF src3 (Binary src1 src2)));
 7234 
 7235   ins_cost(FMUL_SINGLE_COST);
 7236   format %{ "fmadd.s  $dst, $src1, $src2, $src3\t#@maddF_reg_reg" %}
 7237 
 7238   ins_encode %{
 7239     assert(UseFMA, "Needs FMA instructions support.");
 7240     __ fmadd_s(as_FloatRegister($dst$$reg),
 7241                as_FloatRegister($src1$$reg),
 7242                as_FloatRegister($src2$$reg),
 7243                as_FloatRegister($src3$$reg));
 7244   %}
 7245 
 7246   ins_pipe(pipe_class_default);
 7247 %}
 7248 
 7249 // src1 * src2 + src3
 7250 instruct maddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7251   match(Set dst (FmaD src3 (Binary src1 src2)));
 7252 
 7253   ins_cost(FMUL_DOUBLE_COST);
 7254   format %{ "fmadd.d  $dst, $src1, $src2, $src3\t#@maddD_reg_reg" %}
 7255 
 7256   ins_encode %{
 7257     assert(UseFMA, "Needs FMA instructions support.");
 7258     __ fmadd_d(as_FloatRegister($dst$$reg),
 7259                as_FloatRegister($src1$$reg),
 7260                as_FloatRegister($src2$$reg),
 7261                as_FloatRegister($src3$$reg));
 7262   %}
 7263 
 7264   ins_pipe(pipe_class_default);
 7265 %}
 7266 
 7267 // src1 * src2 - src3
 7268 instruct msubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7269   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
 7270 
 7271   ins_cost(FMUL_SINGLE_COST);
 7272   format %{ "fmsub.s  $dst, $src1, $src2, $src3\t#@msubF_reg_reg" %}
 7273 
 7274   ins_encode %{
 7275     assert(UseFMA, "Needs FMA instructions support.");
 7276     __ fmsub_s(as_FloatRegister($dst$$reg),
 7277                as_FloatRegister($src1$$reg),
 7278                as_FloatRegister($src2$$reg),
 7279                as_FloatRegister($src3$$reg));
 7280   %}
 7281 
 7282   ins_pipe(pipe_class_default);
 7283 %}
 7284 
 7285 // src1 * src2 - src3
 7286 instruct msubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7287   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
 7288 
 7289   ins_cost(FMUL_DOUBLE_COST);
 7290   format %{ "fmsub.d  $dst, $src1, $src2, $src3\t#@msubD_reg_reg" %}
 7291 
 7292   ins_encode %{
 7293     assert(UseFMA, "Needs FMA instructions support.");
 7294     __ fmsub_d(as_FloatRegister($dst$$reg),
 7295                as_FloatRegister($src1$$reg),
 7296                as_FloatRegister($src2$$reg),
 7297                as_FloatRegister($src3$$reg));
 7298   %}
 7299 
 7300   ins_pipe(pipe_class_default);
 7301 %}
 7302 
 7303 // src1 * (-src2) + src3
 7304 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7305 instruct nmsubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7306   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
 7307 
 7308   ins_cost(FMUL_SINGLE_COST);
 7309   format %{ "fnmsub.s  $dst, $src1, $src2, $src3\t#@nmsubF_reg_reg" %}
 7310 
 7311   ins_encode %{
 7312     assert(UseFMA, "Needs FMA instructions support.");
 7313     __ fnmsub_s(as_FloatRegister($dst$$reg),
 7314                 as_FloatRegister($src1$$reg),
 7315                 as_FloatRegister($src2$$reg),
 7316                 as_FloatRegister($src3$$reg));
 7317   %}
 7318 
 7319   ins_pipe(pipe_class_default);
 7320 %}
 7321 
 7322 // src1 * (-src2) + src3
 7323 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7324 instruct nmsubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7325   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
 7326 
 7327   ins_cost(FMUL_DOUBLE_COST);
 7328   format %{ "fnmsub.d  $dst, $src1, $src2, $src3\t#@nmsubD_reg_reg" %}
 7329 
 7330   ins_encode %{
 7331     assert(UseFMA, "Needs FMA instructions support.");
 7332     __ fnmsub_d(as_FloatRegister($dst$$reg),
 7333                 as_FloatRegister($src1$$reg),
 7334                 as_FloatRegister($src2$$reg),
 7335                 as_FloatRegister($src3$$reg));
 7336   %}
 7337 
 7338   ins_pipe(pipe_class_default);
 7339 %}
 7340 
 7341 // src1 * (-src2) - src3
 7342 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7343 instruct nmaddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7344   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
 7345 
 7346   ins_cost(FMUL_SINGLE_COST);
 7347   format %{ "fnmadd.s  $dst, $src1, $src2, $src3\t#@nmaddF_reg_reg" %}
 7348 
 7349   ins_encode %{
 7350     assert(UseFMA, "Needs FMA instructions support.");
 7351     __ fnmadd_s(as_FloatRegister($dst$$reg),
 7352                 as_FloatRegister($src1$$reg),
 7353                 as_FloatRegister($src2$$reg),
 7354                 as_FloatRegister($src3$$reg));
 7355   %}
 7356 
 7357   ins_pipe(pipe_class_default);
 7358 %}
 7359 
 7360 // src1 * (-src2) - src3
 7361 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7362 instruct nmaddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7363   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
 7364 
 7365   ins_cost(FMUL_DOUBLE_COST);
 7366   format %{ "fnmadd.d  $dst, $src1, $src2, $src3\t#@nmaddD_reg_reg" %}
 7367 
 7368   ins_encode %{
 7369     assert(UseFMA, "Needs FMA instructions support.");
 7370     __ fnmadd_d(as_FloatRegister($dst$$reg),
 7371                 as_FloatRegister($src1$$reg),
 7372                 as_FloatRegister($src2$$reg),
 7373                 as_FloatRegister($src3$$reg));
 7374   %}
 7375 
 7376   ins_pipe(pipe_class_default);
 7377 %}
 7378 
 7379 // Math.max(FF)F
 7380 instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7381   match(Set dst (MaxF src1 src2));
 7382   effect(TEMP_DEF dst, KILL cr);
 7383 
 7384   format %{ "maxF $dst, $src1, $src2" %}
 7385 
 7386   ins_encode %{
 7387     __ minmax_fp(as_FloatRegister($dst$$reg),
 7388                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7389                  false /* is_double */, false /* is_min */);
 7390   %}
 7391 
 7392   ins_pipe(pipe_class_default);
 7393 %}
 7394 
 7395 // Math.min(FF)F
 7396 instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7397   match(Set dst (MinF src1 src2));
 7398   effect(TEMP_DEF dst, KILL cr);
 7399 
 7400   format %{ "minF $dst, $src1, $src2" %}
 7401 
 7402   ins_encode %{
 7403     __ minmax_fp(as_FloatRegister($dst$$reg),
 7404                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7405                  false /* is_double */, true /* is_min */);
 7406   %}
 7407 
 7408   ins_pipe(pipe_class_default);
 7409 %}
 7410 
 7411 // Math.max(DD)D
 7412 instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7413   match(Set dst (MaxD src1 src2));
 7414   effect(TEMP_DEF dst, KILL cr);
 7415 
 7416   format %{ "maxD $dst, $src1, $src2" %}
 7417 
 7418   ins_encode %{
 7419     __ minmax_fp(as_FloatRegister($dst$$reg),
 7420                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7421                  true /* is_double */, false /* is_min */);
 7422   %}
 7423 
 7424   ins_pipe(pipe_class_default);
 7425 %}
 7426 
 7427 // Math.min(DD)D
 7428 instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7429   match(Set dst (MinD src1 src2));
 7430   effect(TEMP_DEF dst, KILL cr);
 7431 
 7432   format %{ "minD $dst, $src1, $src2" %}
 7433 
 7434   ins_encode %{
 7435     __ minmax_fp(as_FloatRegister($dst$$reg),
 7436                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7437                  true /* is_double */, true /* is_min */);
 7438   %}
 7439 
 7440   ins_pipe(pipe_class_default);
 7441 %}
 7442 
 7443 // Float.isInfinite
 7444 instruct isInfiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7445 %{
 7446   match(Set dst (IsInfiniteF src));
 7447 
 7448   format %{ "isInfinite $dst, $src" %}
 7449   ins_encode %{
 7450     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7451     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::inf);
 7452     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7453   %}
 7454 
 7455   ins_pipe(pipe_class_default);
 7456 %}
 7457 
 7458 // Double.isInfinite
 7459 instruct isInfiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7460 %{
 7461   match(Set dst (IsInfiniteD src));
 7462 
 7463   format %{ "isInfinite $dst, $src" %}
 7464   ins_encode %{
 7465     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7466     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::inf);
 7467     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7468   %}
 7469 
 7470   ins_pipe(pipe_class_default);
 7471 %}
 7472 
 7473 // Float.isFinite
 7474 instruct isFiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7475 %{
 7476   match(Set dst (IsFiniteF src));
 7477 
 7478   format %{ "isFinite $dst, $src" %}
 7479   ins_encode %{
 7480     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7481     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::finite);
 7482     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7483   %}
 7484 
 7485   ins_pipe(pipe_class_default);
 7486 %}
 7487 
 7488 // Double.isFinite
 7489 instruct isFiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7490 %{
 7491   match(Set dst (IsFiniteD src));
 7492 
 7493   format %{ "isFinite $dst, $src" %}
 7494   ins_encode %{
 7495     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7496     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::fclass_mask::finite);
 7497     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7498   %}
 7499 
 7500   ins_pipe(pipe_class_default);
 7501 %}
 7502 
 7503 instruct divF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7504   match(Set dst (DivF src1  src2));
 7505 
 7506   ins_cost(FDIV_COST);
 7507   format %{ "fdiv.s  $dst, $src1, $src2\t#@divF_reg_reg" %}
 7508 
 7509   ins_encode %{
 7510     __ fdiv_s(as_FloatRegister($dst$$reg),
 7511               as_FloatRegister($src1$$reg),
 7512               as_FloatRegister($src2$$reg));
 7513   %}
 7514 
 7515   ins_pipe(fp_div_s);
 7516 %}
 7517 
 7518 instruct divD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7519   match(Set dst (DivD src1  src2));
 7520 
 7521   ins_cost(FDIV_COST);
 7522   format %{ "fdiv.d  $dst, $src1, $src2\t#@divD_reg_reg" %}
 7523 
 7524   ins_encode %{
 7525     __ fdiv_d(as_FloatRegister($dst$$reg),
 7526               as_FloatRegister($src1$$reg),
 7527               as_FloatRegister($src2$$reg));
 7528   %}
 7529 
 7530   ins_pipe(fp_div_d);
 7531 %}
 7532 
 7533 instruct negF_reg_reg(fRegF dst, fRegF src) %{
 7534   match(Set dst (NegF src));
 7535 
 7536   ins_cost(XFER_COST);
 7537   format %{ "fsgnjn.s  $dst, $src, $src\t#@negF_reg_reg" %}
 7538 
 7539   ins_encode %{
 7540     __ fneg_s(as_FloatRegister($dst$$reg),
 7541               as_FloatRegister($src$$reg));
 7542   %}
 7543 
 7544   ins_pipe(fp_uop_s);
 7545 %}
 7546 
 7547 instruct negD_reg_reg(fRegD dst, fRegD src) %{
 7548   match(Set dst (NegD src));
 7549 
 7550   ins_cost(XFER_COST);
 7551   format %{ "fsgnjn.d  $dst, $src, $src\t#@negD_reg_reg" %}
 7552 
 7553   ins_encode %{
 7554     __ fneg_d(as_FloatRegister($dst$$reg),
 7555               as_FloatRegister($src$$reg));
 7556   %}
 7557 
 7558   ins_pipe(fp_uop_d);
 7559 %}
 7560 
 7561 instruct absI_reg(iRegINoSp dst, iRegIorL2I src) %{
 7562   match(Set dst (AbsI src));
 7563 
 7564   ins_cost(ALU_COST * 3);
 7565   format %{
 7566     "sraiw  t0, $src, 0x1f\n\t"
 7567     "addw  $dst, $src, t0\n\t"
 7568     "xorr  $dst, $dst, t0\t#@absI_reg"
 7569   %}
 7570 
 7571   ins_encode %{
 7572     __ sraiw(t0, as_Register($src$$reg), 0x1f);
 7573     __ addw(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7574     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7575   %}
 7576 
 7577   ins_pipe(pipe_class_default);
 7578 %}
 7579 
 7580 instruct absL_reg(iRegLNoSp dst, iRegL src) %{
 7581   match(Set dst (AbsL src));
 7582 
 7583   ins_cost(ALU_COST * 3);
 7584   format %{
 7585     "srai  t0, $src, 0x3f\n\t"
 7586     "add  $dst, $src, t0\n\t"
 7587     "xorr  $dst, $dst, t0\t#@absL_reg"
 7588   %}
 7589 
 7590   ins_encode %{
 7591     __ srai(t0, as_Register($src$$reg), 0x3f);
 7592     __ add(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7593     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7594   %}
 7595 
 7596   ins_pipe(pipe_class_default);
 7597 %}
 7598 
 7599 instruct absF_reg(fRegF dst, fRegF src) %{
 7600   match(Set dst (AbsF src));
 7601 
 7602   ins_cost(XFER_COST);
 7603   format %{ "fsgnjx.s  $dst, $src, $src\t#@absF_reg" %}
 7604   ins_encode %{
 7605     __ fabs_s(as_FloatRegister($dst$$reg),
 7606               as_FloatRegister($src$$reg));
 7607   %}
 7608 
 7609   ins_pipe(fp_uop_s);
 7610 %}
 7611 
 7612 instruct absD_reg(fRegD dst, fRegD src) %{
 7613   match(Set dst (AbsD src));
 7614 
 7615   ins_cost(XFER_COST);
 7616   format %{ "fsgnjx.d  $dst, $src, $src\t#@absD_reg" %}
 7617   ins_encode %{
 7618     __ fabs_d(as_FloatRegister($dst$$reg),
 7619               as_FloatRegister($src$$reg));
 7620   %}
 7621 
 7622   ins_pipe(fp_uop_d);
 7623 %}
 7624 
 7625 instruct sqrtF_reg(fRegF dst, fRegF src) %{
 7626   match(Set dst (SqrtF src));
 7627 
 7628   ins_cost(FSQRT_COST);
 7629   format %{ "fsqrt.s  $dst, $src\t#@sqrtF_reg" %}
 7630   ins_encode %{
 7631     __ fsqrt_s(as_FloatRegister($dst$$reg),
 7632                as_FloatRegister($src$$reg));
 7633   %}
 7634 
 7635   ins_pipe(fp_sqrt_s);
 7636 %}
 7637 
 7638 instruct sqrtD_reg(fRegD dst, fRegD src) %{
 7639   match(Set dst (SqrtD src));
 7640 
 7641   ins_cost(FSQRT_COST);
 7642   format %{ "fsqrt.d  $dst, $src\t#@sqrtD_reg" %}
 7643   ins_encode %{
 7644     __ fsqrt_d(as_FloatRegister($dst$$reg),
 7645                as_FloatRegister($src$$reg));
 7646   %}
 7647 
 7648   ins_pipe(fp_sqrt_d);
 7649 %}
 7650 
 7651 // Round Instruction
 7652 instruct roundD_reg(fRegD dst, fRegD src, immI rmode, iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3) %{
 7653   match(Set dst (RoundDoubleMode src rmode));
 7654   ins_cost(2 * XFER_COST + BRANCH_COST);
 7655   effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 7656 
 7657   format %{ "RoundDoubleMode $src, $rmode" %}
 7658   ins_encode %{
 7659     __ round_double_mode(as_FloatRegister($dst$$reg),
 7660                as_FloatRegister($src$$reg), $rmode$$constant, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 7661   %}
 7662   ins_pipe(pipe_class_default);
 7663 %}
 7664 
 7665 // Copysign and signum intrinsics
 7666 
 7667 instruct copySignD_reg(fRegD dst, fRegD src1, fRegD src2, immD zero) %{
 7668   match(Set dst (CopySignD src1 (Binary src2 zero)));
 7669   format %{ "CopySignD  $dst $src1 $src2" %}
 7670   ins_encode %{
 7671     FloatRegister dst = as_FloatRegister($dst$$reg),
 7672                   src1 = as_FloatRegister($src1$$reg),
 7673                   src2 = as_FloatRegister($src2$$reg);
 7674     __ fsgnj_d(dst, src1, src2);
 7675   %}
 7676   ins_pipe(fp_dop_reg_reg_d);
 7677 %}
 7678 
 7679 instruct copySignF_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7680   match(Set dst (CopySignF src1 src2));
 7681   format %{ "CopySignF  $dst $src1 $src2" %}
 7682   ins_encode %{
 7683     FloatRegister dst = as_FloatRegister($dst$$reg),
 7684                   src1 = as_FloatRegister($src1$$reg),
 7685                   src2 = as_FloatRegister($src2$$reg);
 7686     __ fsgnj_s(dst, src1, src2);
 7687   %}
 7688   ins_pipe(fp_dop_reg_reg_s);
 7689 %}
 7690 
 7691 instruct signumD_reg(fRegD dst, immD zero, fRegD one) %{
 7692   match(Set dst (SignumD dst (Binary zero one)));
 7693   format %{ "signumD  $dst, $dst" %}
 7694   ins_encode %{
 7695     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), true /* is_double */);
 7696   %}
 7697   ins_pipe(pipe_class_default);
 7698 %}
 7699 
 7700 instruct signumF_reg(fRegF dst, immF zero, fRegF one) %{
 7701   match(Set dst (SignumF dst (Binary zero one)));
 7702   format %{ "signumF  $dst, $dst" %}
 7703   ins_encode %{
 7704     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), false /* is_double */);
 7705   %}
 7706   ins_pipe(pipe_class_default);
 7707 %}
 7708 
 7709 // Arithmetic Instructions End
 7710 
 7711 // ============================================================================
 7712 // Logical Instructions
 7713 
 7714 // Register And
 7715 instruct andI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7716   match(Set dst (AndI src1 src2));
 7717 
 7718   format %{ "andr  $dst, $src1, $src2\t#@andI_reg_reg" %}
 7719 
 7720   ins_cost(ALU_COST);
 7721   ins_encode %{
 7722     __ andr(as_Register($dst$$reg),
 7723             as_Register($src1$$reg),
 7724             as_Register($src2$$reg));
 7725   %}
 7726 
 7727   ins_pipe(ialu_reg_reg);
 7728 %}
 7729 
 7730 // Immediate And
 7731 instruct andI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7732   match(Set dst (AndI src1 src2));
 7733 
 7734   format %{ "andi  $dst, $src1, $src2\t#@andI_reg_imm" %}
 7735 
 7736   ins_cost(ALU_COST);
 7737   ins_encode %{
 7738     __ andi(as_Register($dst$$reg),
 7739             as_Register($src1$$reg),
 7740             (int32_t)($src2$$constant));
 7741   %}
 7742 
 7743   ins_pipe(ialu_reg_imm);
 7744 %}
 7745 
 7746 // Register Or
 7747 instruct orI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7748   match(Set dst (OrI src1 src2));
 7749 
 7750   format %{ "orr  $dst, $src1, $src2\t#@orI_reg_reg" %}
 7751 
 7752   ins_cost(ALU_COST);
 7753   ins_encode %{
 7754     __ orr(as_Register($dst$$reg),
 7755            as_Register($src1$$reg),
 7756            as_Register($src2$$reg));
 7757   %}
 7758 
 7759   ins_pipe(ialu_reg_reg);
 7760 %}
 7761 
 7762 // Immediate Or
 7763 instruct orI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7764   match(Set dst (OrI src1 src2));
 7765 
 7766   format %{ "ori  $dst, $src1, $src2\t#@orI_reg_imm" %}
 7767 
 7768   ins_cost(ALU_COST);
 7769   ins_encode %{
 7770     __ ori(as_Register($dst$$reg),
 7771            as_Register($src1$$reg),
 7772            (int32_t)($src2$$constant));
 7773   %}
 7774 
 7775   ins_pipe(ialu_reg_imm);
 7776 %}
 7777 
 7778 // Register Xor
 7779 instruct xorI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7780   match(Set dst (XorI src1 src2));
 7781 
 7782   format %{ "xorr  $dst, $src1, $src2\t#@xorI_reg_reg" %}
 7783 
 7784   ins_cost(ALU_COST);
 7785   ins_encode %{
 7786     __ xorr(as_Register($dst$$reg),
 7787             as_Register($src1$$reg),
 7788             as_Register($src2$$reg));
 7789   %}
 7790 
 7791   ins_pipe(ialu_reg_reg);
 7792 %}
 7793 
 7794 // Immediate Xor
 7795 instruct xorI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7796   match(Set dst (XorI src1 src2));
 7797 
 7798   format %{ "xori  $dst, $src1, $src2\t#@xorI_reg_imm" %}
 7799 
 7800   ins_cost(ALU_COST);
 7801   ins_encode %{
 7802     __ xori(as_Register($dst$$reg),
 7803             as_Register($src1$$reg),
 7804             (int32_t)($src2$$constant));
 7805   %}
 7806 
 7807   ins_pipe(ialu_reg_imm);
 7808 %}
 7809 
 7810 // Register And Long
 7811 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7812   match(Set dst (AndL src1 src2));
 7813 
 7814   format %{ "andr  $dst, $src1, $src2\t#@andL_reg_reg" %}
 7815 
 7816   ins_cost(ALU_COST);
 7817   ins_encode %{
 7818     __ andr(as_Register($dst$$reg),
 7819             as_Register($src1$$reg),
 7820             as_Register($src2$$reg));
 7821   %}
 7822 
 7823   ins_pipe(ialu_reg_reg);
 7824 %}
 7825 
 7826 // Immediate And Long
 7827 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7828   match(Set dst (AndL src1 src2));
 7829 
 7830   format %{ "andi  $dst, $src1, $src2\t#@andL_reg_imm" %}
 7831 
 7832   ins_cost(ALU_COST);
 7833   ins_encode %{
 7834     __ andi(as_Register($dst$$reg),
 7835             as_Register($src1$$reg),
 7836             (int32_t)($src2$$constant));
 7837   %}
 7838 
 7839   ins_pipe(ialu_reg_imm);
 7840 %}
 7841 
 7842 // Register Or Long
 7843 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7844   match(Set dst (OrL src1 src2));
 7845 
 7846   format %{ "orr  $dst, $src1, $src2\t#@orL_reg_reg" %}
 7847 
 7848   ins_cost(ALU_COST);
 7849   ins_encode %{
 7850     __ orr(as_Register($dst$$reg),
 7851            as_Register($src1$$reg),
 7852            as_Register($src2$$reg));
 7853   %}
 7854 
 7855   ins_pipe(ialu_reg_reg);
 7856 %}
 7857 
 7858 // Immediate Or Long
 7859 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7860   match(Set dst (OrL src1 src2));
 7861 
 7862   format %{ "ori  $dst, $src1, $src2\t#@orL_reg_imm" %}
 7863 
 7864   ins_cost(ALU_COST);
 7865   ins_encode %{
 7866     __ ori(as_Register($dst$$reg),
 7867            as_Register($src1$$reg),
 7868            (int32_t)($src2$$constant));
 7869   %}
 7870 
 7871   ins_pipe(ialu_reg_imm);
 7872 %}
 7873 
 7874 // Register Xor Long
 7875 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7876   match(Set dst (XorL src1 src2));
 7877 
 7878   format %{ "xorr  $dst, $src1, $src2\t#@xorL_reg_reg" %}
 7879 
 7880   ins_cost(ALU_COST);
 7881   ins_encode %{
 7882     __ xorr(as_Register($dst$$reg),
 7883             as_Register($src1$$reg),
 7884             as_Register($src2$$reg));
 7885   %}
 7886 
 7887   ins_pipe(ialu_reg_reg);
 7888 %}
 7889 
 7890 // Immediate Xor Long
 7891 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7892   match(Set dst (XorL src1 src2));
 7893 
 7894   ins_cost(ALU_COST);
 7895   format %{ "xori  $dst, $src1, $src2\t#@xorL_reg_imm" %}
 7896 
 7897   ins_encode %{
 7898     __ xori(as_Register($dst$$reg),
 7899             as_Register($src1$$reg),
 7900             (int32_t)($src2$$constant));
 7901   %}
 7902 
 7903   ins_pipe(ialu_reg_imm);
 7904 %}
 7905 
 7906 // ============================================================================
 7907 // MemBar Instruction
 7908 
 7909 instruct load_fence() %{
 7910   match(LoadFence);
 7911   ins_cost(ALU_COST);
 7912 
 7913   format %{ "#@load_fence" %}
 7914 
 7915   ins_encode %{
 7916     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 7917   %}
 7918   ins_pipe(pipe_serial);
 7919 %}
 7920 
 7921 instruct membar_acquire() %{
 7922   match(MemBarAcquire);
 7923   ins_cost(ALU_COST);
 7924 
 7925   format %{ "#@membar_acquire\n\t"
 7926             "fence ir iorw" %}
 7927 
 7928   ins_encode %{
 7929     __ block_comment("membar_acquire");
 7930     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 7931   %}
 7932 
 7933   ins_pipe(pipe_serial);
 7934 %}
 7935 
 7936 instruct membar_acquire_lock() %{
 7937   match(MemBarAcquireLock);
 7938   ins_cost(0);
 7939 
 7940   format %{ "#@membar_acquire_lock (elided)" %}
 7941 
 7942   ins_encode %{
 7943     __ block_comment("membar_acquire_lock (elided)");
 7944   %}
 7945 
 7946   ins_pipe(pipe_serial);
 7947 %}
 7948 
 7949 instruct store_fence() %{
 7950   match(StoreFence);
 7951   ins_cost(ALU_COST);
 7952 
 7953   format %{ "#@store_fence" %}
 7954 
 7955   ins_encode %{
 7956     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
 7957   %}
 7958   ins_pipe(pipe_serial);
 7959 %}
 7960 
 7961 instruct membar_release() %{
 7962   match(MemBarRelease);
 7963   ins_cost(ALU_COST);
 7964 
 7965   format %{ "#@membar_release\n\t"
 7966             "fence iorw ow" %}
 7967 
 7968   ins_encode %{
 7969     __ block_comment("membar_release");
 7970     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
 7971   %}
 7972   ins_pipe(pipe_serial);
 7973 %}
 7974 
 7975 instruct membar_storestore() %{
 7976   match(MemBarStoreStore);
 7977   match(StoreStoreFence);
 7978   ins_cost(ALU_COST);
 7979 
 7980   format %{ "MEMBAR-store-store\t#@membar_storestore" %}
 7981 
 7982   ins_encode %{
 7983     __ membar(MacroAssembler::StoreStore);
 7984   %}
 7985   ins_pipe(pipe_serial);
 7986 %}
 7987 
 7988 instruct membar_release_lock() %{
 7989   match(MemBarReleaseLock);
 7990   ins_cost(0);
 7991 
 7992   format %{ "#@membar_release_lock (elided)" %}
 7993 
 7994   ins_encode %{
 7995     __ block_comment("membar_release_lock (elided)");
 7996   %}
 7997 
 7998   ins_pipe(pipe_serial);
 7999 %}
 8000 
 8001 instruct membar_volatile() %{
 8002   match(MemBarVolatile);
 8003   ins_cost(ALU_COST);
 8004 
 8005   format %{ "#@membar_volatile\n\t"
 8006              "fence iorw iorw"%}
 8007 
 8008   ins_encode %{
 8009     __ block_comment("membar_volatile");
 8010     __ membar(MacroAssembler::StoreLoad);
 8011   %}
 8012 
 8013   ins_pipe(pipe_serial);
 8014 %}
 8015 
 8016 instruct spin_wait() %{
 8017   predicate(UseZihintpause);
 8018   match(OnSpinWait);
 8019   ins_cost(CACHE_MISS_COST);
 8020 
 8021   format %{ "spin_wait" %}
 8022 
 8023   ins_encode %{
 8024     __ pause();
 8025   %}
 8026 
 8027   ins_pipe(pipe_serial);
 8028 %}
 8029 
 8030 // ============================================================================
 8031 // Cast Instructions (Java-level type cast)
 8032 
 8033 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8034   match(Set dst (CastX2P src));
 8035 
 8036   ins_cost(ALU_COST);
 8037   format %{ "mv  $dst, $src\t# long -> ptr, #@castX2P" %}
 8038 
 8039   ins_encode %{
 8040     if ($dst$$reg != $src$$reg) {
 8041       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8042     }
 8043   %}
 8044 
 8045   ins_pipe(ialu_reg);
 8046 %}
 8047 
 8048 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8049   match(Set dst (CastP2X src));
 8050 
 8051   ins_cost(ALU_COST);
 8052   format %{ "mv  $dst, $src\t# ptr -> long, #@castP2X" %}
 8053 
 8054   ins_encode %{
 8055     if ($dst$$reg != $src$$reg) {
 8056       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8057     }
 8058   %}
 8059 
 8060   ins_pipe(ialu_reg);
 8061 %}
 8062 
 8063 instruct castPP(iRegPNoSp dst)
 8064 %{
 8065   match(Set dst (CastPP dst));
 8066   ins_cost(0);
 8067 
 8068   size(0);
 8069   format %{ "# castPP of $dst, #@castPP" %}
 8070   ins_encode(/* empty encoding */);
 8071   ins_pipe(pipe_class_empty);
 8072 %}
 8073 
 8074 instruct castLL(iRegL dst)
 8075 %{
 8076   match(Set dst (CastLL dst));
 8077 
 8078   size(0);
 8079   format %{ "# castLL of $dst, #@castLL" %}
 8080   ins_encode(/* empty encoding */);
 8081   ins_cost(0);
 8082   ins_pipe(pipe_class_empty);
 8083 %}
 8084 
 8085 instruct castII(iRegI dst)
 8086 %{
 8087   match(Set dst (CastII dst));
 8088 
 8089   size(0);
 8090   format %{ "# castII of $dst, #@castII" %}
 8091   ins_encode(/* empty encoding */);
 8092   ins_cost(0);
 8093   ins_pipe(pipe_class_empty);
 8094 %}
 8095 
 8096 instruct checkCastPP(iRegPNoSp dst)
 8097 %{
 8098   match(Set dst (CheckCastPP dst));
 8099 
 8100   size(0);
 8101   ins_cost(0);
 8102   format %{ "# checkcastPP of $dst, #@checkCastPP" %}
 8103   ins_encode(/* empty encoding */);
 8104   ins_pipe(pipe_class_empty);
 8105 %}
 8106 
 8107 instruct castFF(fRegF dst)
 8108 %{
 8109   match(Set dst (CastFF dst));
 8110 
 8111   size(0);
 8112   format %{ "# castFF of $dst" %}
 8113   ins_encode(/* empty encoding */);
 8114   ins_cost(0);
 8115   ins_pipe(pipe_class_empty);
 8116 %}
 8117 
 8118 instruct castDD(fRegD dst)
 8119 %{
 8120   match(Set dst (CastDD dst));
 8121 
 8122   size(0);
 8123   format %{ "# castDD of $dst" %}
 8124   ins_encode(/* empty encoding */);
 8125   ins_cost(0);
 8126   ins_pipe(pipe_class_empty);
 8127 %}
 8128 
 8129 instruct castVV(vReg dst)
 8130 %{
 8131   match(Set dst (CastVV dst));
 8132 
 8133   size(0);
 8134   format %{ "# castVV of $dst" %}
 8135   ins_encode(/* empty encoding */);
 8136   ins_cost(0);
 8137   ins_pipe(pipe_class_empty);
 8138 %}
 8139 
 8140 // ============================================================================
 8141 // Convert Instructions
 8142 
 8143 // int to bool
 8144 instruct convI2Bool(iRegINoSp dst, iRegI src)
 8145 %{
 8146   match(Set dst (Conv2B src));
 8147 
 8148   ins_cost(ALU_COST);
 8149   format %{ "snez  $dst, $src\t#@convI2Bool" %}
 8150 
 8151   ins_encode %{
 8152     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8153   %}
 8154 
 8155   ins_pipe(ialu_reg);
 8156 %}
 8157 
 8158 // pointer to bool
 8159 instruct convP2Bool(iRegINoSp dst, iRegP src)
 8160 %{
 8161   match(Set dst (Conv2B src));
 8162 
 8163   ins_cost(ALU_COST);
 8164   format %{ "snez  $dst, $src\t#@convP2Bool" %}
 8165 
 8166   ins_encode %{
 8167     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8168   %}
 8169 
 8170   ins_pipe(ialu_reg);
 8171 %}
 8172 
 8173 // int <-> long
 8174 
 8175 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
 8176 %{
 8177   match(Set dst (ConvI2L src));
 8178 
 8179   ins_cost(ALU_COST);
 8180   format %{ "addw  $dst, $src, zr\t#@convI2L_reg_reg" %}
 8181   ins_encode %{
 8182     __ sign_extend(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8183   %}
 8184   ins_pipe(ialu_reg);
 8185 %}
 8186 
 8187 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
 8188   match(Set dst (ConvL2I src));
 8189 
 8190   ins_cost(ALU_COST);
 8191   format %{ "addw  $dst, $src, zr\t#@convL2I_reg" %}
 8192 
 8193   ins_encode %{
 8194     __ sign_extend(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8195   %}
 8196 
 8197   ins_pipe(ialu_reg);
 8198 %}
 8199 
 8200 // int to unsigned long (Zero-extend)
 8201 instruct convI2UL_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
 8202 %{
 8203   match(Set dst (AndL (ConvI2L src) mask));
 8204 
 8205   ins_cost(ALU_COST * 2);
 8206   format %{ "zero_extend $dst, $src, 32\t# i2ul, #@convI2UL_reg_reg" %}
 8207 
 8208   ins_encode %{
 8209     __ zero_extend(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8210   %}
 8211 
 8212   ins_pipe(ialu_reg_shift);
 8213 %}
 8214 
 8215 // float <-> double
 8216 
 8217 instruct convF2D_reg(fRegD dst, fRegF src) %{
 8218   match(Set dst (ConvF2D src));
 8219 
 8220   ins_cost(XFER_COST);
 8221   format %{ "fcvt.d.s  $dst, $src\t#@convF2D_reg" %}
 8222 
 8223   ins_encode %{
 8224     __ fcvt_d_s(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8225   %}
 8226 
 8227   ins_pipe(fp_f2d);
 8228 %}
 8229 
 8230 instruct convD2F_reg(fRegF dst, fRegD src) %{
 8231   match(Set dst (ConvD2F src));
 8232 
 8233   ins_cost(XFER_COST);
 8234   format %{ "fcvt.s.d  $dst, $src\t#@convD2F_reg" %}
 8235 
 8236   ins_encode %{
 8237     __ fcvt_s_d(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8238   %}
 8239 
 8240   ins_pipe(fp_d2f);
 8241 %}
 8242 
 8243 // single <-> half precision
 8244 
 8245 instruct convHF2F_reg_reg(fRegF dst, iRegINoSp src, iRegINoSp tmp) %{
 8246   match(Set dst (ConvHF2F src));
 8247   effect(TEMP tmp);
 8248   format %{ "fmv.h.x $dst, $src\t# move source from $src to $dst\n\t"
 8249             "fcvt.s.h $dst, $dst\t# convert half to single precision"
 8250   %}
 8251   ins_encode %{
 8252     __ float16_to_float($dst$$FloatRegister, $src$$Register, $tmp$$Register);
 8253   %}
 8254   ins_pipe(pipe_slow);
 8255 %}
 8256 
 8257 instruct convF2HF_reg_reg(iRegINoSp dst, fRegF src, fRegF ftmp, iRegINoSp xtmp) %{
 8258   match(Set dst (ConvF2HF src));
 8259   effect(TEMP_DEF dst, TEMP ftmp, TEMP xtmp);
 8260   format %{ "fcvt.h.s $ftmp, $src\t# convert single precision to half\n\t"
 8261             "fmv.x.h $dst, $ftmp\t# move result from $ftmp to $dst"
 8262   %}
 8263   ins_encode %{
 8264     __ float_to_float16($dst$$Register, $src$$FloatRegister, $ftmp$$FloatRegister, $xtmp$$Register);
 8265   %}
 8266   ins_pipe(pipe_slow);
 8267 %}
 8268 
 8269 // float <-> int
 8270 
 8271 instruct convF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 8272   match(Set dst (ConvF2I src));
 8273 
 8274   ins_cost(XFER_COST);
 8275   format %{ "fcvt.w.s  $dst, $src\t#@convF2I_reg_reg" %}
 8276 
 8277   ins_encode %{
 8278     __ fcvt_w_s_safe($dst$$Register, $src$$FloatRegister);
 8279   %}
 8280 
 8281   ins_pipe(fp_f2i);
 8282 %}
 8283 
 8284 instruct convI2F_reg_reg(fRegF dst, iRegIorL2I src) %{
 8285   match(Set dst (ConvI2F src));
 8286 
 8287   ins_cost(XFER_COST);
 8288   format %{ "fcvt.s.w  $dst, $src\t#@convI2F_reg_reg" %}
 8289 
 8290   ins_encode %{
 8291     __ fcvt_s_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8292   %}
 8293 
 8294   ins_pipe(fp_i2f);
 8295 %}
 8296 
 8297 // float <-> long
 8298 
 8299 instruct convF2L_reg_reg(iRegLNoSp dst, fRegF src) %{
 8300   match(Set dst (ConvF2L src));
 8301 
 8302   ins_cost(XFER_COST);
 8303   format %{ "fcvt.l.s  $dst, $src\t#@convF2L_reg_reg" %}
 8304 
 8305   ins_encode %{
 8306     __ fcvt_l_s_safe($dst$$Register, $src$$FloatRegister);
 8307   %}
 8308 
 8309   ins_pipe(fp_f2l);
 8310 %}
 8311 
 8312 instruct convL2F_reg_reg(fRegF dst, iRegL src) %{
 8313   match(Set dst (ConvL2F src));
 8314 
 8315   ins_cost(XFER_COST);
 8316   format %{ "fcvt.s.l  $dst, $src\t#@convL2F_reg_reg" %}
 8317 
 8318   ins_encode %{
 8319     __ fcvt_s_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8320   %}
 8321 
 8322   ins_pipe(fp_l2f);
 8323 %}
 8324 
 8325 // double <-> int
 8326 
 8327 instruct convD2I_reg_reg(iRegINoSp dst, fRegD src) %{
 8328   match(Set dst (ConvD2I src));
 8329 
 8330   ins_cost(XFER_COST);
 8331   format %{ "fcvt.w.d  $dst, $src\t#@convD2I_reg_reg" %}
 8332 
 8333   ins_encode %{
 8334     __ fcvt_w_d_safe($dst$$Register, $src$$FloatRegister);
 8335   %}
 8336 
 8337   ins_pipe(fp_d2i);
 8338 %}
 8339 
 8340 instruct convI2D_reg_reg(fRegD dst, iRegIorL2I src) %{
 8341   match(Set dst (ConvI2D src));
 8342 
 8343   ins_cost(XFER_COST);
 8344   format %{ "fcvt.d.w  $dst, $src\t#@convI2D_reg_reg" %}
 8345 
 8346   ins_encode %{
 8347     __ fcvt_d_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8348   %}
 8349 
 8350   ins_pipe(fp_i2d);
 8351 %}
 8352 
 8353 // double <-> long
 8354 
 8355 instruct convD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 8356   match(Set dst (ConvD2L src));
 8357 
 8358   ins_cost(XFER_COST);
 8359   format %{ "fcvt.l.d  $dst, $src\t#@convD2L_reg_reg" %}
 8360 
 8361   ins_encode %{
 8362     __ fcvt_l_d_safe($dst$$Register, $src$$FloatRegister);
 8363   %}
 8364 
 8365   ins_pipe(fp_d2l);
 8366 %}
 8367 
 8368 instruct convL2D_reg_reg(fRegD dst, iRegL src) %{
 8369   match(Set dst (ConvL2D src));
 8370 
 8371   ins_cost(XFER_COST);
 8372   format %{ "fcvt.d.l  $dst, $src\t#@convL2D_reg_reg" %}
 8373 
 8374   ins_encode %{
 8375     __ fcvt_d_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8376   %}
 8377 
 8378   ins_pipe(fp_l2d);
 8379 %}
 8380 
 8381 // Convert oop into int for vectors alignment masking
 8382 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8383   match(Set dst (ConvL2I (CastP2X src)));
 8384 
 8385   ins_cost(ALU_COST * 2);
 8386   format %{ "zero_extend $dst, $src, 32\t# ptr -> int, #@convP2I" %}
 8387 
 8388   ins_encode %{
 8389     __ zero_extend($dst$$Register, $src$$Register, 32);
 8390   %}
 8391 
 8392   ins_pipe(ialu_reg);
 8393 %}
 8394 
 8395 // Convert compressed oop into int for vectors alignment masking
 8396 // in case of 32bit oops (heap < 4Gb).
 8397 instruct convN2I(iRegINoSp dst, iRegN src)
 8398 %{
 8399   predicate(CompressedOops::shift() == 0);
 8400   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8401 
 8402   ins_cost(ALU_COST);
 8403   format %{ "mv  $dst, $src\t# compressed ptr -> int, #@convN2I" %}
 8404 
 8405   ins_encode %{
 8406     __ mv($dst$$Register, $src$$Register);
 8407   %}
 8408 
 8409   ins_pipe(ialu_reg);
 8410 %}
 8411 
 8412 instruct round_double_reg(iRegLNoSp dst, fRegD src, fRegD ftmp) %{
 8413   match(Set dst (RoundD src));
 8414 
 8415   ins_cost(XFER_COST + BRANCH_COST);
 8416   effect(TEMP ftmp);
 8417   format %{ "java_round_double $dst, $src\t#@round_double_reg" %}
 8418 
 8419   ins_encode %{
 8420     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8421   %}
 8422 
 8423   ins_pipe(pipe_slow);
 8424 %}
 8425 
 8426 instruct round_float_reg(iRegINoSp dst, fRegF src, fRegF ftmp) %{
 8427   match(Set dst (RoundF src));
 8428 
 8429   ins_cost(XFER_COST + BRANCH_COST);
 8430   effect(TEMP ftmp);
 8431   format %{ "java_round_float $dst, $src\t#@round_float_reg" %}
 8432 
 8433   ins_encode %{
 8434     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8435   %}
 8436 
 8437   ins_pipe(pipe_slow);
 8438 %}
 8439 
 8440 // Convert oop pointer into compressed form
 8441 instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{
 8442   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8443   match(Set dst (EncodeP src));
 8444   ins_cost(ALU_COST);
 8445   format %{ "encode_heap_oop  $dst, $src\t#@encodeHeapOop" %}
 8446   ins_encode %{
 8447     Register s = $src$$Register;
 8448     Register d = $dst$$Register;
 8449     __ encode_heap_oop(d, s);
 8450   %}
 8451   ins_pipe(pipe_class_default);
 8452 %}
 8453 
 8454 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src) %{
 8455   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8456   match(Set dst (EncodeP src));
 8457   ins_cost(ALU_COST);
 8458   format %{ "encode_heap_oop_not_null $dst, $src\t#@encodeHeapOop_not_null" %}
 8459   ins_encode %{
 8460     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8461   %}
 8462   ins_pipe(pipe_class_default);
 8463 %}
 8464 
 8465 instruct decodeHeapOop(iRegPNoSp dst, iRegN src) %{
 8466   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8467             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8468   match(Set dst (DecodeN src));
 8469 
 8470   ins_cost(0);
 8471   format %{ "decode_heap_oop  $dst, $src\t#@decodeHeapOop" %}
 8472   ins_encode %{
 8473     Register s = $src$$Register;
 8474     Register d = $dst$$Register;
 8475     __ decode_heap_oop(d, s);
 8476   %}
 8477   ins_pipe(pipe_class_default);
 8478 %}
 8479 
 8480 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src) %{
 8481   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8482             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8483   match(Set dst (DecodeN src));
 8484 
 8485   ins_cost(0);
 8486   format %{ "decode_heap_oop_not_null $dst, $src\t#@decodeHeapOop_not_null" %}
 8487   ins_encode %{
 8488     Register s = $src$$Register;
 8489     Register d = $dst$$Register;
 8490     __ decode_heap_oop_not_null(d, s);
 8491   %}
 8492   ins_pipe(pipe_class_default);
 8493 %}
 8494 
 8495 // Convert klass pointer into compressed form.
 8496 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8497   match(Set dst (EncodePKlass src));
 8498 
 8499   ins_cost(ALU_COST);
 8500   format %{ "encode_klass_not_null  $dst, $src\t#@encodeKlass_not_null" %}
 8501 
 8502   ins_encode %{
 8503     Register src_reg = as_Register($src$$reg);
 8504     Register dst_reg = as_Register($dst$$reg);
 8505     __ encode_klass_not_null(dst_reg, src_reg, t0);
 8506   %}
 8507 
 8508    ins_pipe(pipe_class_default);
 8509 %}
 8510 
 8511 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src, iRegPNoSp tmp) %{
 8512   match(Set dst (DecodeNKlass src));
 8513 
 8514   effect(TEMP tmp);
 8515 
 8516   ins_cost(ALU_COST);
 8517   format %{ "decode_klass_not_null  $dst, $src\t#@decodeKlass_not_null" %}
 8518 
 8519   ins_encode %{
 8520     Register src_reg = as_Register($src$$reg);
 8521     Register dst_reg = as_Register($dst$$reg);
 8522     Register tmp_reg = as_Register($tmp$$reg);
 8523     __ decode_klass_not_null(dst_reg, src_reg, tmp_reg);
 8524   %}
 8525 
 8526    ins_pipe(pipe_class_default);
 8527 %}
 8528 
 8529 // stack <-> reg and reg <-> reg shuffles with no conversion
 8530 
 8531 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
 8532 
 8533   match(Set dst (MoveF2I src));
 8534 
 8535   effect(DEF dst, USE src);
 8536 
 8537   ins_cost(LOAD_COST);
 8538 
 8539   format %{ "lw  $dst, $src\t#@MoveF2I_stack_reg" %}
 8540 
 8541   ins_encode %{
 8542     __ lw(as_Register($dst$$reg), Address(sp, $src$$disp));
 8543   %}
 8544 
 8545   ins_pipe(iload_reg_reg);
 8546 
 8547 %}
 8548 
 8549 instruct MoveI2F_stack_reg(fRegF dst, stackSlotI src) %{
 8550 
 8551   match(Set dst (MoveI2F src));
 8552 
 8553   effect(DEF dst, USE src);
 8554 
 8555   ins_cost(LOAD_COST);
 8556 
 8557   format %{ "flw  $dst, $src\t#@MoveI2F_stack_reg" %}
 8558 
 8559   ins_encode %{
 8560     __ flw(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 8561   %}
 8562 
 8563   ins_pipe(fp_load_mem_s);
 8564 
 8565 %}
 8566 
 8567 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
 8568 
 8569   match(Set dst (MoveD2L src));
 8570 
 8571   effect(DEF dst, USE src);
 8572 
 8573   ins_cost(LOAD_COST);
 8574 
 8575   format %{ "ld  $dst, $src\t#@MoveD2L_stack_reg" %}
 8576 
 8577   ins_encode %{
 8578     __ ld(as_Register($dst$$reg), Address(sp, $src$$disp));
 8579   %}
 8580 
 8581   ins_pipe(iload_reg_reg);
 8582 
 8583 %}
 8584 
 8585 instruct MoveL2D_stack_reg(fRegD dst, stackSlotL src) %{
 8586 
 8587   match(Set dst (MoveL2D src));
 8588 
 8589   effect(DEF dst, USE src);
 8590 
 8591   ins_cost(LOAD_COST);
 8592 
 8593   format %{ "fld  $dst, $src\t#@MoveL2D_stack_reg" %}
 8594 
 8595   ins_encode %{
 8596     __ fld(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 8597   %}
 8598 
 8599   ins_pipe(fp_load_mem_d);
 8600 
 8601 %}
 8602 
 8603 instruct MoveF2I_reg_stack(stackSlotI dst, fRegF src) %{
 8604 
 8605   match(Set dst (MoveF2I src));
 8606 
 8607   effect(DEF dst, USE src);
 8608 
 8609   ins_cost(STORE_COST);
 8610 
 8611   format %{ "fsw  $src, $dst\t#@MoveF2I_reg_stack" %}
 8612 
 8613   ins_encode %{
 8614     __ fsw(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 8615   %}
 8616 
 8617   ins_pipe(fp_store_reg_s);
 8618 
 8619 %}
 8620 
 8621 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
 8622 
 8623   match(Set dst (MoveI2F src));
 8624 
 8625   effect(DEF dst, USE src);
 8626 
 8627   ins_cost(STORE_COST);
 8628 
 8629   format %{ "sw  $src, $dst\t#@MoveI2F_reg_stack" %}
 8630 
 8631   ins_encode %{
 8632     __ sw(as_Register($src$$reg), Address(sp, $dst$$disp));
 8633   %}
 8634 
 8635   ins_pipe(istore_reg_reg);
 8636 
 8637 %}
 8638 
 8639 instruct MoveD2L_reg_stack(stackSlotL dst, fRegD src) %{
 8640 
 8641   match(Set dst (MoveD2L src));
 8642 
 8643   effect(DEF dst, USE src);
 8644 
 8645   ins_cost(STORE_COST);
 8646 
 8647   format %{ "fsd  $dst, $src\t#@MoveD2L_reg_stack" %}
 8648 
 8649   ins_encode %{
 8650     __ fsd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 8651   %}
 8652 
 8653   ins_pipe(fp_store_reg_d);
 8654 
 8655 %}
 8656 
 8657 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
 8658 
 8659   match(Set dst (MoveL2D src));
 8660 
 8661   effect(DEF dst, USE src);
 8662 
 8663   ins_cost(STORE_COST);
 8664 
 8665   format %{ "sd  $src, $dst\t#@MoveL2D_reg_stack" %}
 8666 
 8667   ins_encode %{
 8668     __ sd(as_Register($src$$reg), Address(sp, $dst$$disp));
 8669   %}
 8670 
 8671   ins_pipe(istore_reg_reg);
 8672 
 8673 %}
 8674 
 8675 instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 8676 
 8677   match(Set dst (MoveF2I src));
 8678 
 8679   effect(DEF dst, USE src);
 8680 
 8681   ins_cost(FMVX_COST);
 8682 
 8683   format %{ "fmv.x.w  $dst, $src\t#@MoveF2I_reg_reg" %}
 8684 
 8685   ins_encode %{
 8686     __ fmv_x_w(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 8687   %}
 8688 
 8689   ins_pipe(fp_f2i);
 8690 
 8691 %}
 8692 
 8693 instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{
 8694 
 8695   match(Set dst (MoveI2F src));
 8696 
 8697   effect(DEF dst, USE src);
 8698 
 8699   ins_cost(FMVX_COST);
 8700 
 8701   format %{ "fmv.w.x  $dst, $src\t#@MoveI2F_reg_reg" %}
 8702 
 8703   ins_encode %{
 8704     __ fmv_w_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8705   %}
 8706 
 8707   ins_pipe(fp_i2f);
 8708 
 8709 %}
 8710 
 8711 instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 8712 
 8713   match(Set dst (MoveD2L src));
 8714 
 8715   effect(DEF dst, USE src);
 8716 
 8717   ins_cost(FMVX_COST);
 8718 
 8719   format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %}
 8720 
 8721   ins_encode %{
 8722     __ fmv_x_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 8723   %}
 8724 
 8725   ins_pipe(fp_d2l);
 8726 
 8727 %}
 8728 
 8729 instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{
 8730 
 8731   match(Set dst (MoveL2D src));
 8732 
 8733   effect(DEF dst, USE src);
 8734 
 8735   ins_cost(FMVX_COST);
 8736 
 8737   format %{ "fmv.d.x  $dst, $src\t#@MoveL2D_reg_reg" %}
 8738 
 8739   ins_encode %{
 8740     __ fmv_d_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8741   %}
 8742 
 8743   ins_pipe(fp_l2d);
 8744 
 8745 %}
 8746 
 8747 // ============================================================================
 8748 // Compare Instructions which set the result float comparisons in dest register.
 8749 
 8750 instruct cmpF3_reg_reg(iRegINoSp dst, fRegF op1, fRegF op2)
 8751 %{
 8752   match(Set dst (CmpF3 op1 op2));
 8753 
 8754   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 8755   format %{ "flt.s  $dst, $op2, $op1\t#@cmpF3_reg_reg\n\t"
 8756             "bgtz   $dst, done\n\t"
 8757             "feq.s  $dst, $op1, $op2\n\t"
 8758             "addi   $dst, $dst, -1\n\t"
 8759             "done:"
 8760   %}
 8761 
 8762   ins_encode %{
 8763     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 8764     __ float_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg),
 8765                      as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 8766   %}
 8767 
 8768   ins_pipe(pipe_class_default);
 8769 %}
 8770 
 8771 instruct cmpD3_reg_reg(iRegINoSp dst, fRegD op1, fRegD op2)
 8772 %{
 8773   match(Set dst (CmpD3 op1 op2));
 8774 
 8775   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 8776   format %{ "flt.d  $dst, $op2, $op1\t#@cmpD3_reg_reg\n\t"
 8777             "bgtz   $dst, done\n\t"
 8778             "feq.d  $dst, $op1, $op2\n\t"
 8779             "addi   $dst, $dst, -1\n\t"
 8780             "done:"
 8781   %}
 8782 
 8783   ins_encode %{
 8784     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 8785     __ double_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 8786   %}
 8787 
 8788   ins_pipe(pipe_class_default);
 8789 %}
 8790 
 8791 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 8792 %{
 8793   match(Set dst (CmpL3 op1 op2));
 8794 
 8795   ins_cost(ALU_COST * 3 + BRANCH_COST);
 8796   format %{ "slt   $dst, $op2, $op1\t#@cmpL3_reg_reg\n\t"
 8797             "bnez  $dst, done\n\t"
 8798             "slt   $dst, $op1, $op2\n\t"
 8799             "neg   $dst, $dst\n\t"
 8800             "done:"
 8801   %}
 8802   ins_encode %{
 8803     __ cmp_l2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 8804     __ mv(as_Register($dst$$reg), t0);
 8805   %}
 8806 
 8807   ins_pipe(pipe_class_default);
 8808 %}
 8809 
 8810 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 8811 %{
 8812   match(Set dst (CmpUL3 op1 op2));
 8813 
 8814   ins_cost(ALU_COST * 3 + BRANCH_COST);
 8815   format %{ "sltu  $dst, $op2, $op1\t#@cmpUL3_reg_reg\n\t"
 8816             "bnez  $dst, done\n\t"
 8817             "sltu  $dst, $op1, $op2\n\t"
 8818             "neg   $dst, $dst\n\t"
 8819             "done:"
 8820   %}
 8821   ins_encode %{
 8822     __ cmp_ul2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 8823     __ mv(as_Register($dst$$reg), t0);
 8824   %}
 8825 
 8826   ins_pipe(pipe_class_default);
 8827 %}
 8828 
 8829 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI op1, iRegI op2)
 8830 %{
 8831   match(Set dst (CmpU3 op1 op2));
 8832 
 8833   ins_cost(ALU_COST * 3 + BRANCH_COST);
 8834   format %{ "sltu  $dst, $op2, $op1\t#@cmpU3_reg_reg\n\t"
 8835             "bnez  $dst, done\n\t"
 8836             "sltu  $dst, $op1, $op2\n\t"
 8837             "neg   $dst, $dst\n\t"
 8838             "done:"
 8839   %}
 8840   ins_encode %{
 8841     __ cmp_uw2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 8842     __ mv(as_Register($dst$$reg), t0);
 8843   %}
 8844 
 8845   ins_pipe(pipe_class_default);
 8846 %}
 8847 
 8848 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q)
 8849 %{
 8850   match(Set dst (CmpLTMask p q));
 8851 
 8852   ins_cost(2 * ALU_COST);
 8853 
 8854   format %{ "slt $dst, $p, $q\t#@cmpLTMask_reg_reg\n\t"
 8855             "subw $dst, zr, $dst\t#@cmpLTMask_reg_reg"
 8856   %}
 8857 
 8858   ins_encode %{
 8859     __ slt(as_Register($dst$$reg), as_Register($p$$reg), as_Register($q$$reg));
 8860     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 8861   %}
 8862 
 8863   ins_pipe(ialu_reg_reg);
 8864 %}
 8865 
 8866 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I op, immI0 zero)
 8867 %{
 8868   match(Set dst (CmpLTMask op zero));
 8869 
 8870   ins_cost(ALU_COST);
 8871 
 8872   format %{ "sraiw $dst, $dst, 31\t#@cmpLTMask_reg_reg" %}
 8873 
 8874   ins_encode %{
 8875     __ sraiw(as_Register($dst$$reg), as_Register($op$$reg), 31);
 8876   %}
 8877 
 8878   ins_pipe(ialu_reg_shift);
 8879 %}
 8880 
 8881 
 8882 // ============================================================================
 8883 // Max and Min
 8884 
 8885 instruct minI_reg_reg(iRegINoSp dst, iRegI src)
 8886 %{
 8887   match(Set dst (MinI dst src));
 8888 
 8889   ins_cost(BRANCH_COST + ALU_COST);
 8890   format %{
 8891     "ble $dst, $src, skip\t#@minI_reg_reg\n\t"
 8892     "mv  $dst, $src\n\t"
 8893     "skip:"
 8894   %}
 8895 
 8896   ins_encode %{
 8897     Label Lskip;
 8898     __ ble(as_Register($dst$$reg), as_Register($src$$reg), Lskip);
 8899     __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8900     __ bind(Lskip);
 8901   %}
 8902 
 8903   ins_pipe(pipe_class_compare);
 8904 %}
 8905 
 8906 instruct maxI_reg_reg(iRegINoSp dst, iRegI src)
 8907 %{
 8908   match(Set dst (MaxI dst src));
 8909 
 8910   ins_cost(BRANCH_COST + ALU_COST);
 8911   format %{
 8912     "bge $dst, $src, skip\t#@maxI_reg_reg\n\t"
 8913     "mv  $dst, $src\n\t"
 8914     "skip:"
 8915   %}
 8916 
 8917   ins_encode %{
 8918     Label Lskip;
 8919     __ bge(as_Register($dst$$reg), as_Register($src$$reg), Lskip);
 8920     __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8921     __ bind(Lskip);
 8922   %}
 8923 
 8924   ins_pipe(pipe_class_compare);
 8925 %}
 8926 
 8927 // special case for comparing with zero
 8928 // n.b. this is selected in preference to the rule above because it
 8929 // avoids loading constant 0 into a source register
 8930 
 8931 instruct minI_reg_zero(iRegINoSp dst, immI0 zero)
 8932 %{
 8933   match(Set dst (MinI dst zero));
 8934   match(Set dst (MinI zero dst));
 8935 
 8936   ins_cost(BRANCH_COST + ALU_COST);
 8937   format %{
 8938     "blez $dst, skip\t#@minI_reg_zero\n\t"
 8939     "mv   $dst, zr\n\t"
 8940     "skip:"
 8941   %}
 8942 
 8943   ins_encode %{
 8944     Label Lskip;
 8945     __ blez(as_Register($dst$$reg), Lskip);
 8946     __ mv(as_Register($dst$$reg), zr);
 8947     __ bind(Lskip);
 8948   %}
 8949 
 8950   ins_pipe(pipe_class_compare);
 8951 %}
 8952 
 8953 instruct maxI_reg_zero(iRegINoSp dst, immI0 zero)
 8954 %{
 8955   match(Set dst (MaxI dst zero));
 8956   match(Set dst (MaxI zero dst));
 8957 
 8958   ins_cost(BRANCH_COST + ALU_COST);
 8959   format %{
 8960     "bgez $dst, skip\t#@maxI_reg_zero\n\t"
 8961     "mv   $dst, zr\n\t"
 8962     "skip:"
 8963   %}
 8964 
 8965   ins_encode %{
 8966     Label Lskip;
 8967     __ bgez(as_Register($dst$$reg), Lskip);
 8968     __ mv(as_Register($dst$$reg), zr);
 8969     __ bind(Lskip);
 8970   %}
 8971 
 8972   ins_pipe(pipe_class_compare);
 8973 %}
 8974 
 8975 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 8976 %{
 8977   match(Set dst (MinI src1 src2));
 8978 
 8979   effect(DEF dst, USE src1, USE src2);
 8980 
 8981   ins_cost(BRANCH_COST + ALU_COST * 2);
 8982   format %{
 8983     "ble $src1, $src2, Lsrc1\t#@minI_rReg\n\t"
 8984     "mv $dst, $src2\n\t"
 8985     "j Ldone\n\t"
 8986     "Lsrc1:\n\t"
 8987     "mv $dst, $src1\n\t"
 8988     "Ldone:"
 8989   %}
 8990 
 8991   ins_encode %{
 8992     Label Lsrc1, Ldone;
 8993     __ ble(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1);
 8994     __ mv(as_Register($dst$$reg), as_Register($src2$$reg));
 8995     __ j(Ldone);
 8996     __ bind(Lsrc1);
 8997     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 8998     __ bind(Ldone);
 8999   %}
 9000 
 9001   ins_pipe(pipe_class_compare);
 9002 %}
 9003 
 9004 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 9005 %{
 9006   match(Set dst (MaxI src1 src2));
 9007 
 9008   effect(DEF dst, USE src1, USE src2);
 9009 
 9010   ins_cost(BRANCH_COST + ALU_COST * 2);
 9011   format %{
 9012     "bge $src1, $src2, Lsrc1\t#@maxI_rReg\n\t"
 9013     "mv $dst, $src2\n\t"
 9014     "j Ldone\n\t"
 9015     "Lsrc1:\n\t"
 9016     "mv $dst, $src1\n\t"
 9017     "Ldone:"
 9018   %}
 9019 
 9020   ins_encode %{
 9021     Label Lsrc1, Ldone;
 9022     __ bge(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1);
 9023     __ mv(as_Register($dst$$reg), as_Register($src2$$reg));
 9024     __ j(Ldone);
 9025     __ bind(Lsrc1);
 9026     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 9027     __ bind(Ldone);
 9028 
 9029   %}
 9030 
 9031   ins_pipe(pipe_class_compare);
 9032 %}
 9033 
 9034 // ============================================================================
 9035 // Branch Instructions
 9036 // Direct Branch.
 9037 instruct branch(label lbl)
 9038 %{
 9039   match(Goto);
 9040 
 9041   effect(USE lbl);
 9042 
 9043   ins_cost(BRANCH_COST);
 9044   format %{ "j  $lbl\t#@branch" %}
 9045 
 9046   ins_encode(riscv_enc_j(lbl));
 9047 
 9048   ins_pipe(pipe_branch);
 9049 %}
 9050 
 9051 // ============================================================================
 9052 // Compare and Branch Instructions
 9053 
 9054 // Patterns for short (< 12KiB) variants
 9055 
 9056 // Compare flags and branch near instructions.
 9057 instruct cmpFlag_branch(cmpOpEqNe cmp, rFlagsReg cr, label lbl) %{
 9058   match(If cmp cr);
 9059   effect(USE lbl);
 9060 
 9061   ins_cost(BRANCH_COST);
 9062   format %{ "b$cmp  $cr, zr, $lbl\t#@cmpFlag_branch" %}
 9063 
 9064   ins_encode %{
 9065     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label));
 9066   %}
 9067   ins_pipe(pipe_cmpz_branch);
 9068   ins_short_branch(1);
 9069 %}
 9070 
 9071 // Compare signed int and branch near instructions
 9072 instruct cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 9073 %{
 9074   // Same match rule as `far_cmpI_branch'.
 9075   match(If cmp (CmpI op1 op2));
 9076 
 9077   effect(USE lbl);
 9078 
 9079   ins_cost(BRANCH_COST);
 9080 
 9081   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_branch" %}
 9082 
 9083   ins_encode %{
 9084     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9085   %}
 9086 
 9087   ins_pipe(pipe_cmp_branch);
 9088   ins_short_branch(1);
 9089 %}
 9090 
 9091 instruct cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 9092 %{
 9093   // Same match rule as `far_cmpI_loop'.
 9094   match(CountedLoopEnd cmp (CmpI op1 op2));
 9095 
 9096   effect(USE lbl);
 9097 
 9098   ins_cost(BRANCH_COST);
 9099 
 9100   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_loop" %}
 9101 
 9102   ins_encode %{
 9103     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9104   %}
 9105 
 9106   ins_pipe(pipe_cmp_branch);
 9107   ins_short_branch(1);
 9108 %}
 9109 
 9110 // Compare unsigned int and branch near instructions
 9111 instruct cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl)
 9112 %{
 9113   // Same match rule as `far_cmpU_branch'.
 9114   match(If cmp (CmpU op1 op2));
 9115 
 9116   effect(USE lbl);
 9117 
 9118   ins_cost(BRANCH_COST);
 9119 
 9120   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpU_branch" %}
 9121 
 9122   ins_encode %{
 9123     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9124                   as_Register($op2$$reg), *($lbl$$label));
 9125   %}
 9126 
 9127   ins_pipe(pipe_cmp_branch);
 9128   ins_short_branch(1);
 9129 %}
 9130 
 9131 // Compare signed long and branch near instructions
 9132 instruct cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9133 %{
 9134   // Same match rule as `far_cmpL_branch'.
 9135   match(If cmp (CmpL op1 op2));
 9136 
 9137   effect(USE lbl);
 9138 
 9139   ins_cost(BRANCH_COST);
 9140 
 9141   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_branch" %}
 9142 
 9143   ins_encode %{
 9144     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9145   %}
 9146 
 9147   ins_pipe(pipe_cmp_branch);
 9148   ins_short_branch(1);
 9149 %}
 9150 
 9151 instruct cmpL_loop(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9152 %{
 9153   // Same match rule as `far_cmpL_loop'.
 9154   match(CountedLoopEnd cmp (CmpL op1 op2));
 9155 
 9156   effect(USE lbl);
 9157 
 9158   ins_cost(BRANCH_COST);
 9159 
 9160   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_loop" %}
 9161 
 9162   ins_encode %{
 9163     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9164   %}
 9165 
 9166   ins_pipe(pipe_cmp_branch);
 9167   ins_short_branch(1);
 9168 %}
 9169 
 9170 // Compare unsigned long and branch near instructions
 9171 instruct cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl)
 9172 %{
 9173   // Same match rule as `far_cmpUL_branch'.
 9174   match(If cmp (CmpUL op1 op2));
 9175 
 9176   effect(USE lbl);
 9177 
 9178   ins_cost(BRANCH_COST);
 9179   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpUL_branch" %}
 9180 
 9181   ins_encode %{
 9182     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9183                   as_Register($op2$$reg), *($lbl$$label));
 9184   %}
 9185 
 9186   ins_pipe(pipe_cmp_branch);
 9187   ins_short_branch(1);
 9188 %}
 9189 
 9190 // Compare pointer and branch near instructions
 9191 instruct cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9192 %{
 9193   // Same match rule as `far_cmpP_branch'.
 9194   match(If cmp (CmpP op1 op2));
 9195 
 9196   effect(USE lbl);
 9197 
 9198   ins_cost(BRANCH_COST);
 9199 
 9200   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpP_branch" %}
 9201 
 9202   ins_encode %{
 9203     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9204                   as_Register($op2$$reg), *($lbl$$label));
 9205   %}
 9206 
 9207   ins_pipe(pipe_cmp_branch);
 9208   ins_short_branch(1);
 9209 %}
 9210 
 9211 // Compare narrow pointer and branch near instructions
 9212 instruct cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9213 %{
 9214   // Same match rule as `far_cmpN_branch'.
 9215   match(If cmp (CmpN op1 op2));
 9216 
 9217   effect(USE lbl);
 9218 
 9219   ins_cost(BRANCH_COST);
 9220 
 9221   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpN_branch" %}
 9222 
 9223   ins_encode %{
 9224     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9225                   as_Register($op2$$reg), *($lbl$$label));
 9226   %}
 9227 
 9228   ins_pipe(pipe_cmp_branch);
 9229   ins_short_branch(1);
 9230 %}
 9231 
 9232 // Compare float and branch near instructions
 9233 instruct cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
 9234 %{
 9235   // Same match rule as `far_cmpF_branch'.
 9236   match(If cmp (CmpF op1 op2));
 9237 
 9238   effect(USE lbl);
 9239 
 9240   ins_cost(XFER_COST + BRANCH_COST);
 9241   format %{ "float_b$cmp $op1, $op2, $lbl \t#@cmpF_branch"%}
 9242 
 9243   ins_encode %{
 9244     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label));
 9245   %}
 9246 
 9247   ins_pipe(pipe_class_compare);
 9248   ins_short_branch(1);
 9249 %}
 9250 
 9251 // Compare double and branch near instructions
 9252 instruct cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
 9253 %{
 9254   // Same match rule as `far_cmpD_branch'.
 9255   match(If cmp (CmpD op1 op2));
 9256   effect(USE lbl);
 9257 
 9258   ins_cost(XFER_COST + BRANCH_COST);
 9259   format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_branch"%}
 9260 
 9261   ins_encode %{
 9262     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
 9263                         as_FloatRegister($op2$$reg), *($lbl$$label));
 9264   %}
 9265 
 9266   ins_pipe(pipe_class_compare);
 9267   ins_short_branch(1);
 9268 %}
 9269 
 9270 // Compare signed int with zero and branch near instructions
 9271 instruct cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9272 %{
 9273   // Same match rule as `far_cmpI_reg_imm0_branch'.
 9274   match(If cmp (CmpI op1 zero));
 9275 
 9276   effect(USE op1, USE lbl);
 9277 
 9278   ins_cost(BRANCH_COST);
 9279   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_branch" %}
 9280 
 9281   ins_encode %{
 9282     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9283   %}
 9284 
 9285   ins_pipe(pipe_cmpz_branch);
 9286   ins_short_branch(1);
 9287 %}
 9288 
 9289 instruct cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9290 %{
 9291   // Same match rule as `far_cmpI_reg_imm0_loop'.
 9292   match(CountedLoopEnd cmp (CmpI op1 zero));
 9293 
 9294   effect(USE op1, USE lbl);
 9295 
 9296   ins_cost(BRANCH_COST);
 9297 
 9298   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_loop" %}
 9299 
 9300   ins_encode %{
 9301     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9302   %}
 9303 
 9304   ins_pipe(pipe_cmpz_branch);
 9305   ins_short_branch(1);
 9306 %}
 9307 
 9308 // Compare unsigned int with zero and branch near instructions
 9309 instruct cmpUEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
 9310 %{
 9311   // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_branch'.
 9312   match(If cmp (CmpU op1 zero));
 9313 
 9314   effect(USE op1, USE lbl);
 9315 
 9316   ins_cost(BRANCH_COST);
 9317 
 9318   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_branch" %}
 9319 
 9320   ins_encode %{
 9321     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9322   %}
 9323 
 9324   ins_pipe(pipe_cmpz_branch);
 9325   ins_short_branch(1);
 9326 %}
 9327 
 9328 // Compare signed long with zero and branch near instructions
 9329 instruct cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9330 %{
 9331   // Same match rule as `far_cmpL_reg_imm0_branch'.
 9332   match(If cmp (CmpL op1 zero));
 9333 
 9334   effect(USE op1, USE lbl);
 9335 
 9336   ins_cost(BRANCH_COST);
 9337 
 9338   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_branch" %}
 9339 
 9340   ins_encode %{
 9341     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9342   %}
 9343 
 9344   ins_pipe(pipe_cmpz_branch);
 9345   ins_short_branch(1);
 9346 %}
 9347 
 9348 instruct cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9349 %{
 9350   // Same match rule as `far_cmpL_reg_imm0_loop'.
 9351   match(CountedLoopEnd cmp (CmpL op1 zero));
 9352 
 9353   effect(USE op1, USE lbl);
 9354 
 9355   ins_cost(BRANCH_COST);
 9356 
 9357   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_loop" %}
 9358 
 9359   ins_encode %{
 9360     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9361   %}
 9362 
 9363   ins_pipe(pipe_cmpz_branch);
 9364   ins_short_branch(1);
 9365 %}
 9366 
 9367 // Compare unsigned long with zero and branch near instructions
 9368 instruct cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
 9369 %{
 9370   // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_branch'.
 9371   match(If cmp (CmpUL op1 zero));
 9372 
 9373   effect(USE op1, USE lbl);
 9374 
 9375   ins_cost(BRANCH_COST);
 9376 
 9377   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_branch" %}
 9378 
 9379   ins_encode %{
 9380     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9381   %}
 9382 
 9383   ins_pipe(pipe_cmpz_branch);
 9384   ins_short_branch(1);
 9385 %}
 9386 
 9387 // Compare pointer with zero and branch near instructions
 9388 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
 9389   // Same match rule as `far_cmpP_reg_imm0_branch'.
 9390   match(If cmp (CmpP op1 zero));
 9391   effect(USE lbl);
 9392 
 9393   ins_cost(BRANCH_COST);
 9394   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_imm0_branch" %}
 9395 
 9396   ins_encode %{
 9397     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9398   %}
 9399 
 9400   ins_pipe(pipe_cmpz_branch);
 9401   ins_short_branch(1);
 9402 %}
 9403 
 9404 // Compare narrow pointer with zero and branch near instructions
 9405 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
 9406   // Same match rule as `far_cmpN_reg_imm0_branch'.
 9407   match(If cmp (CmpN op1 zero));
 9408   effect(USE lbl);
 9409 
 9410   ins_cost(BRANCH_COST);
 9411 
 9412   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpN_imm0_branch" %}
 9413 
 9414   ins_encode %{
 9415     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9416   %}
 9417 
 9418   ins_pipe(pipe_cmpz_branch);
 9419   ins_short_branch(1);
 9420 %}
 9421 
 9422 // Compare narrow pointer with pointer zero and branch near instructions
 9423 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
 9424   // Same match rule as `far_cmpP_narrowOop_imm0_branch'.
 9425   match(If cmp (CmpP (DecodeN op1) zero));
 9426   effect(USE lbl);
 9427 
 9428   ins_cost(BRANCH_COST);
 9429   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_branch" %}
 9430 
 9431   ins_encode %{
 9432     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9433   %}
 9434 
 9435   ins_pipe(pipe_cmpz_branch);
 9436   ins_short_branch(1);
 9437 %}
 9438 
 9439 // Patterns for far (20KiB) variants
 9440 
 9441 instruct far_cmpFlag_branch(cmpOp cmp, rFlagsReg cr, label lbl) %{
 9442   match(If cmp cr);
 9443   effect(USE lbl);
 9444 
 9445   ins_cost(BRANCH_COST);
 9446   format %{ "far_b$cmp $cr, zr, $lbl\t#@far_cmpFlag_branch"%}
 9447 
 9448   ins_encode %{
 9449     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label), /* is_far */ true);
 9450   %}
 9451 
 9452   ins_pipe(pipe_cmpz_branch);
 9453 %}
 9454 
 9455 // Compare signed int and branch far instructions
 9456 instruct far_cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9457   match(If cmp (CmpI op1 op2));
 9458   effect(USE lbl);
 9459 
 9460   ins_cost(BRANCH_COST * 2);
 9461 
 9462   // the format instruction [far_b$cmp] here is be used as two insructions
 9463   // in macroassembler: b$not_cmp(op1, op2, done), j($lbl), bind(done)
 9464   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_branch" %}
 9465 
 9466   ins_encode %{
 9467     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9468   %}
 9469 
 9470   ins_pipe(pipe_cmp_branch);
 9471 %}
 9472 
 9473 instruct far_cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9474   match(CountedLoopEnd cmp (CmpI op1 op2));
 9475   effect(USE lbl);
 9476 
 9477   ins_cost(BRANCH_COST * 2);
 9478   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_loop" %}
 9479 
 9480   ins_encode %{
 9481     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9482   %}
 9483 
 9484   ins_pipe(pipe_cmp_branch);
 9485 %}
 9486 
 9487 instruct far_cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{
 9488   match(If cmp (CmpU op1 op2));
 9489   effect(USE lbl);
 9490 
 9491   ins_cost(BRANCH_COST * 2);
 9492   format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_branch" %}
 9493 
 9494   ins_encode %{
 9495     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9496                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9497   %}
 9498 
 9499   ins_pipe(pipe_cmp_branch);
 9500 %}
 9501 
 9502 instruct far_cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9503   match(If cmp (CmpL op1 op2));
 9504   effect(USE lbl);
 9505 
 9506   ins_cost(BRANCH_COST * 2);
 9507   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_branch" %}
 9508 
 9509   ins_encode %{
 9510     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9511   %}
 9512 
 9513   ins_pipe(pipe_cmp_branch);
 9514 %}
 9515 
 9516 instruct far_cmpLloop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9517   match(CountedLoopEnd cmp (CmpL op1 op2));
 9518   effect(USE lbl);
 9519 
 9520   ins_cost(BRANCH_COST * 2);
 9521   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_loop" %}
 9522 
 9523   ins_encode %{
 9524     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9525   %}
 9526 
 9527   ins_pipe(pipe_cmp_branch);
 9528 %}
 9529 
 9530 instruct far_cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{
 9531   match(If cmp (CmpUL op1 op2));
 9532   effect(USE lbl);
 9533 
 9534   ins_cost(BRANCH_COST * 2);
 9535   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpUL_branch" %}
 9536 
 9537   ins_encode %{
 9538     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9539                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9540   %}
 9541 
 9542   ins_pipe(pipe_cmp_branch);
 9543 %}
 9544 
 9545 instruct far_cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9546 %{
 9547   match(If cmp (CmpP op1 op2));
 9548 
 9549   effect(USE lbl);
 9550 
 9551   ins_cost(BRANCH_COST * 2);
 9552 
 9553   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpP_branch" %}
 9554 
 9555   ins_encode %{
 9556     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9557                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9558   %}
 9559 
 9560   ins_pipe(pipe_cmp_branch);
 9561 %}
 9562 
 9563 instruct far_cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9564 %{
 9565   match(If cmp (CmpN op1 op2));
 9566 
 9567   effect(USE lbl);
 9568 
 9569   ins_cost(BRANCH_COST * 2);
 9570 
 9571   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpN_branch" %}
 9572 
 9573   ins_encode %{
 9574     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9575                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9576   %}
 9577 
 9578   ins_pipe(pipe_cmp_branch);
 9579 %}
 9580 
 9581 // Float compare and branch instructions
 9582 instruct far_cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
 9583 %{
 9584   match(If cmp (CmpF op1 op2));
 9585 
 9586   effect(USE lbl);
 9587 
 9588   ins_cost(XFER_COST + BRANCH_COST * 2);
 9589   format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_branch"%}
 9590 
 9591   ins_encode %{
 9592     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
 9593                         *($lbl$$label), /* is_far */ true);
 9594   %}
 9595 
 9596   ins_pipe(pipe_class_compare);
 9597 %}
 9598 
 9599 // Double compare and branch instructions
 9600 instruct far_cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
 9601 %{
 9602   match(If cmp (CmpD op1 op2));
 9603   effect(USE lbl);
 9604 
 9605   ins_cost(XFER_COST + BRANCH_COST * 2);
 9606   format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_branch"%}
 9607 
 9608   ins_encode %{
 9609     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
 9610                         as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true);
 9611   %}
 9612 
 9613   ins_pipe(pipe_class_compare);
 9614 %}
 9615 
 9616 instruct far_cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9617 %{
 9618   match(If cmp (CmpI op1 zero));
 9619 
 9620   effect(USE op1, USE lbl);
 9621 
 9622   ins_cost(BRANCH_COST * 2);
 9623 
 9624   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_branch" %}
 9625 
 9626   ins_encode %{
 9627     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9628   %}
 9629 
 9630   ins_pipe(pipe_cmpz_branch);
 9631 %}
 9632 
 9633 instruct far_cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9634 %{
 9635   match(CountedLoopEnd cmp (CmpI op1 zero));
 9636 
 9637   effect(USE op1, USE lbl);
 9638 
 9639   ins_cost(BRANCH_COST * 2);
 9640 
 9641   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_loop" %}
 9642 
 9643   ins_encode %{
 9644     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9645   %}
 9646 
 9647   ins_pipe(pipe_cmpz_branch);
 9648 %}
 9649 
 9650 instruct far_cmpUEqNeLeGt_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
 9651 %{
 9652   match(If cmp (CmpU op1 zero));
 9653 
 9654   effect(USE op1, USE lbl);
 9655 
 9656   ins_cost(BRANCH_COST * 2);
 9657 
 9658   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_imm0_branch" %}
 9659 
 9660   ins_encode %{
 9661     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9662   %}
 9663 
 9664   ins_pipe(pipe_cmpz_branch);
 9665 %}
 9666 
 9667 // compare lt/ge unsigned instructs has no short instruct with same match
 9668 instruct far_cmpULtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl)
 9669 %{
 9670   match(If cmp (CmpU op1 zero));
 9671 
 9672   effect(USE op1, USE lbl);
 9673 
 9674   ins_cost(BRANCH_COST);
 9675 
 9676   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_branch" %}
 9677 
 9678   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
 9679 
 9680   ins_pipe(pipe_cmpz_branch);
 9681 %}
 9682 
 9683 instruct far_cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9684 %{
 9685   match(If cmp (CmpL op1 zero));
 9686 
 9687   effect(USE op1, USE lbl);
 9688 
 9689   ins_cost(BRANCH_COST * 2);
 9690 
 9691   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_branch" %}
 9692 
 9693   ins_encode %{
 9694     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9695   %}
 9696 
 9697   ins_pipe(pipe_cmpz_branch);
 9698 %}
 9699 
 9700 instruct far_cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9701 %{
 9702   match(CountedLoopEnd cmp (CmpL op1 zero));
 9703 
 9704   effect(USE op1, USE lbl);
 9705 
 9706   ins_cost(BRANCH_COST * 2);
 9707 
 9708   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_loop" %}
 9709 
 9710   ins_encode %{
 9711     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9712   %}
 9713 
 9714   ins_pipe(pipe_cmpz_branch);
 9715 %}
 9716 
 9717 instruct far_cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
 9718 %{
 9719   match(If cmp (CmpUL op1 zero));
 9720 
 9721   effect(USE op1, USE lbl);
 9722 
 9723   ins_cost(BRANCH_COST * 2);
 9724 
 9725   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_branch" %}
 9726 
 9727   ins_encode %{
 9728     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9729   %}
 9730 
 9731   ins_pipe(pipe_cmpz_branch);
 9732 %}
 9733 
 9734 // compare lt/ge unsigned instructs has no short instruct with same match
 9735 instruct far_cmpULLtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl)
 9736 %{
 9737   match(If cmp (CmpUL op1 zero));
 9738 
 9739   effect(USE op1, USE lbl);
 9740 
 9741   ins_cost(BRANCH_COST);
 9742 
 9743   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_branch" %}
 9744 
 9745   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
 9746 
 9747   ins_pipe(pipe_cmpz_branch);
 9748 %}
 9749 
 9750 instruct far_cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
 9751   match(If cmp (CmpP op1 zero));
 9752   effect(USE lbl);
 9753 
 9754   ins_cost(BRANCH_COST * 2);
 9755   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_imm0_branch" %}
 9756 
 9757   ins_encode %{
 9758     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9759   %}
 9760 
 9761   ins_pipe(pipe_cmpz_branch);
 9762 %}
 9763 
 9764 instruct far_cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
 9765   match(If cmp (CmpN op1 zero));
 9766   effect(USE lbl);
 9767 
 9768   ins_cost(BRANCH_COST * 2);
 9769 
 9770   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpN_imm0_branch" %}
 9771 
 9772   ins_encode %{
 9773     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9774   %}
 9775 
 9776   ins_pipe(pipe_cmpz_branch);
 9777 %}
 9778 
 9779 instruct far_cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
 9780   match(If cmp (CmpP (DecodeN op1) zero));
 9781   effect(USE lbl);
 9782 
 9783   ins_cost(BRANCH_COST * 2);
 9784   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_branch" %}
 9785 
 9786   ins_encode %{
 9787     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9788   %}
 9789 
 9790   ins_pipe(pipe_cmpz_branch);
 9791 %}
 9792 
 9793 // ============================================================================
 9794 // Conditional Move Instructions
 9795 instruct cmovI_cmpI(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOp cop) %{
 9796   match(Set dst (CMoveI (Binary cop (CmpI op1 op2)) (Binary dst src)));
 9797   ins_cost(ALU_COST + BRANCH_COST);
 9798 
 9799   format %{
 9800     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpI\n\t"
 9801   %}
 9802 
 9803   ins_encode %{
 9804     __ enc_cmove($cop$$cmpcode,
 9805                  as_Register($op1$$reg), as_Register($op2$$reg),
 9806                  as_Register($dst$$reg), as_Register($src$$reg));
 9807   %}
 9808 
 9809   ins_pipe(pipe_class_compare);
 9810 %}
 9811 
 9812 instruct cmovI_cmpU(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOpU cop) %{
 9813   match(Set dst (CMoveI (Binary cop (CmpU op1 op2)) (Binary dst src)));
 9814   ins_cost(ALU_COST + BRANCH_COST);
 9815 
 9816   format %{
 9817     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpU\n\t"
 9818   %}
 9819 
 9820   ins_encode %{
 9821     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9822                  as_Register($op1$$reg), as_Register($op2$$reg),
 9823                  as_Register($dst$$reg), as_Register($src$$reg));
 9824   %}
 9825 
 9826   ins_pipe(pipe_class_compare);
 9827 %}
 9828 
 9829 instruct cmovI_cmpL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOp cop) %{
 9830   match(Set dst (CMoveI (Binary cop (CmpL op1 op2)) (Binary dst src)));
 9831   ins_cost(ALU_COST + BRANCH_COST);
 9832 
 9833   format %{
 9834     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpL\n\t"
 9835   %}
 9836 
 9837   ins_encode %{
 9838     __ enc_cmove($cop$$cmpcode,
 9839                  as_Register($op1$$reg), as_Register($op2$$reg),
 9840                  as_Register($dst$$reg), as_Register($src$$reg));
 9841   %}
 9842 
 9843   ins_pipe(pipe_class_compare);
 9844 %}
 9845 
 9846 instruct cmovI_cmpUL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOpU cop) %{
 9847   match(Set dst (CMoveI (Binary cop (CmpUL op1 op2)) (Binary dst src)));
 9848   ins_cost(ALU_COST + BRANCH_COST);
 9849 
 9850   format %{
 9851     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpUL\n\t"
 9852   %}
 9853 
 9854   ins_encode %{
 9855     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9856                  as_Register($op1$$reg), as_Register($op2$$reg),
 9857                  as_Register($dst$$reg), as_Register($src$$reg));
 9858   %}
 9859 
 9860   ins_pipe(pipe_class_compare);
 9861 %}
 9862 
 9863 instruct cmovI_cmpN(iRegINoSp dst, iRegI src, iRegN op1, iRegN op2, cmpOpU cop) %{
 9864   match(Set dst (CMoveI (Binary cop (CmpN op1 op2)) (Binary dst src)));
 9865   ins_cost(ALU_COST + BRANCH_COST);
 9866 
 9867   format %{
 9868     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpN\n\t"
 9869   %}
 9870 
 9871   ins_encode %{
 9872     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9873                  as_Register($op1$$reg), as_Register($op2$$reg),
 9874                  as_Register($dst$$reg), as_Register($src$$reg));
 9875   %}
 9876 
 9877   ins_pipe(pipe_class_compare);
 9878 %}
 9879 
 9880 instruct cmovI_cmpP(iRegINoSp dst, iRegI src, iRegP op1, iRegP op2, cmpOpU cop) %{
 9881   match(Set dst (CMoveI (Binary cop (CmpP op1 op2)) (Binary dst src)));
 9882   ins_cost(ALU_COST + BRANCH_COST);
 9883 
 9884   format %{
 9885     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpP\n\t"
 9886   %}
 9887 
 9888   ins_encode %{
 9889     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9890                  as_Register($op1$$reg), as_Register($op2$$reg),
 9891                  as_Register($dst$$reg), as_Register($src$$reg));
 9892   %}
 9893 
 9894   ins_pipe(pipe_class_compare);
 9895 %}
 9896 
 9897 instruct cmovL_cmpL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOp cop) %{
 9898   match(Set dst (CMoveL (Binary cop (CmpL op1 op2)) (Binary dst src)));
 9899   ins_cost(ALU_COST + BRANCH_COST);
 9900 
 9901   format %{
 9902     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpL\n\t"
 9903   %}
 9904 
 9905   ins_encode %{
 9906     __ enc_cmove($cop$$cmpcode,
 9907                  as_Register($op1$$reg), as_Register($op2$$reg),
 9908                  as_Register($dst$$reg), as_Register($src$$reg));
 9909   %}
 9910 
 9911   ins_pipe(pipe_class_compare);
 9912 %}
 9913 
 9914 instruct cmovL_cmpUL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOpU cop) %{
 9915   match(Set dst (CMoveL (Binary cop (CmpUL op1 op2)) (Binary dst src)));
 9916   ins_cost(ALU_COST + BRANCH_COST);
 9917 
 9918   format %{
 9919     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpUL\n\t"
 9920   %}
 9921 
 9922   ins_encode %{
 9923     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9924                  as_Register($op1$$reg), as_Register($op2$$reg),
 9925                  as_Register($dst$$reg), as_Register($src$$reg));
 9926   %}
 9927 
 9928   ins_pipe(pipe_class_compare);
 9929 %}
 9930 
 9931 instruct cmovL_cmpI(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOp cop) %{
 9932   match(Set dst (CMoveL (Binary cop (CmpI op1 op2)) (Binary dst src)));
 9933   ins_cost(ALU_COST + BRANCH_COST);
 9934 
 9935   format %{
 9936     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpI\n\t"
 9937   %}
 9938 
 9939   ins_encode %{
 9940     __ enc_cmove($cop$$cmpcode,
 9941                  as_Register($op1$$reg), as_Register($op2$$reg),
 9942                  as_Register($dst$$reg), as_Register($src$$reg));
 9943   %}
 9944 
 9945   ins_pipe(pipe_class_compare);
 9946 %}
 9947 
 9948 instruct cmovL_cmpU(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOpU cop) %{
 9949   match(Set dst (CMoveL (Binary cop (CmpU op1 op2)) (Binary dst src)));
 9950   ins_cost(ALU_COST + BRANCH_COST);
 9951 
 9952   format %{
 9953     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpU\n\t"
 9954   %}
 9955 
 9956   ins_encode %{
 9957     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9958                  as_Register($op1$$reg), as_Register($op2$$reg),
 9959                  as_Register($dst$$reg), as_Register($src$$reg));
 9960   %}
 9961 
 9962   ins_pipe(pipe_class_compare);
 9963 %}
 9964 
 9965 instruct cmovL_cmpN(iRegLNoSp dst, iRegL src, iRegN op1, iRegN op2, cmpOpU cop) %{
 9966   match(Set dst (CMoveL (Binary cop (CmpN op1 op2)) (Binary dst src)));
 9967   ins_cost(ALU_COST + BRANCH_COST);
 9968 
 9969   format %{
 9970     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpN\n\t"
 9971   %}
 9972 
 9973   ins_encode %{
 9974     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9975                  as_Register($op1$$reg), as_Register($op2$$reg),
 9976                  as_Register($dst$$reg), as_Register($src$$reg));
 9977   %}
 9978 
 9979   ins_pipe(pipe_class_compare);
 9980 %}
 9981 
 9982 instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop) %{
 9983   match(Set dst (CMoveL (Binary cop (CmpP op1 op2)) (Binary dst src)));
 9984   ins_cost(ALU_COST + BRANCH_COST);
 9985 
 9986   format %{
 9987     "CMove $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpP\n\t"
 9988   %}
 9989 
 9990   ins_encode %{
 9991     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9992                  as_Register($op1$$reg), as_Register($op2$$reg),
 9993                  as_Register($dst$$reg), as_Register($src$$reg));
 9994   %}
 9995 
 9996   ins_pipe(pipe_class_compare);
 9997 %}
 9998 
 9999 // ============================================================================
10000 // Procedure Call/Return Instructions
10001 
10002 // Call Java Static Instruction
10003 // Note: If this code changes, the corresponding ret_addr_offset() and
10004 //       compute_padding() functions will have to be adjusted.
10005 instruct CallStaticJavaDirect(method meth)
10006 %{
10007   match(CallStaticJava);
10008 
10009   effect(USE meth);
10010 
10011   ins_cost(BRANCH_COST);
10012 
10013   format %{ "CALL,static $meth\t#@CallStaticJavaDirect" %}
10014 
10015   ins_encode(riscv_enc_java_static_call(meth),
10016              riscv_enc_call_epilog);
10017 
10018   ins_pipe(pipe_class_call);
10019   ins_alignment(4);
10020 %}
10021 
10022 // TO HERE
10023 
10024 // Call Java Dynamic Instruction
10025 // Note: If this code changes, the corresponding ret_addr_offset() and
10026 //       compute_padding() functions will have to be adjusted.
10027 instruct CallDynamicJavaDirect(method meth, rFlagsReg cr)
10028 %{
10029   match(CallDynamicJava);
10030 
10031   effect(USE meth, KILL cr);
10032 
10033   ins_cost(BRANCH_COST + ALU_COST * 5);
10034 
10035   format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %}
10036 
10037   ins_encode(riscv_enc_java_dynamic_call(meth),
10038              riscv_enc_call_epilog);
10039 
10040   ins_pipe(pipe_class_call);
10041   ins_alignment(4);
10042 %}
10043 
10044 // Call Runtime Instruction
10045 
10046 instruct CallRuntimeDirect(method meth, rFlagsReg cr)
10047 %{
10048   match(CallRuntime);
10049 
10050   effect(USE meth, KILL cr);
10051 
10052   ins_cost(BRANCH_COST);
10053 
10054   format %{ "CALL, runtime $meth\t#@CallRuntimeDirect" %}
10055 
10056   ins_encode(riscv_enc_java_to_runtime(meth));
10057 
10058   ins_pipe(pipe_class_call);
10059 %}
10060 
10061 // Call Runtime Instruction
10062 
10063 instruct CallLeafDirect(method meth, rFlagsReg cr)
10064 %{
10065   match(CallLeaf);
10066 
10067   effect(USE meth, KILL cr);
10068 
10069   ins_cost(BRANCH_COST);
10070 
10071   format %{ "CALL, runtime leaf $meth\t#@CallLeafDirect" %}
10072 
10073   ins_encode(riscv_enc_java_to_runtime(meth));
10074 
10075   ins_pipe(pipe_class_call);
10076 %}
10077 
10078 // Call Runtime Instruction
10079 
10080 instruct CallLeafNoFPDirect(method meth, rFlagsReg cr)
10081 %{
10082   match(CallLeafNoFP);
10083 
10084   effect(USE meth, KILL cr);
10085 
10086   ins_cost(BRANCH_COST);
10087 
10088   format %{ "CALL, runtime leaf nofp $meth\t#@CallLeafNoFPDirect" %}
10089 
10090   ins_encode(riscv_enc_java_to_runtime(meth));
10091 
10092   ins_pipe(pipe_class_call);
10093 %}
10094 
10095 // ============================================================================
10096 // Partial Subtype Check
10097 //
10098 // superklass array for an instance of the superklass.  Set a hidden
10099 // internal cache on a hit (cache is checked with exposed code in
10100 // gen_subtype_check()).  Return zero for a hit.  The encoding
10101 // ALSO sets flags.
10102 
10103 instruct partialSubtypeCheck(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, rFlagsReg cr)
10104 %{
10105   match(Set result (PartialSubtypeCheck sub super));
10106   effect(KILL tmp, KILL cr);
10107 
10108   ins_cost(11 * DEFAULT_COST);
10109   format %{ "partialSubtypeCheck $result, $sub, $super\t#@partialSubtypeCheck" %}
10110 
10111   ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10112 
10113   opcode(0x1); // Force zero of result reg on hit
10114 
10115   ins_pipe(pipe_class_memory);
10116 %}
10117 
10118 instruct partialSubtypeCheckConstSuper(iRegP_R14 sub, iRegP_R10 super_reg, immP super_con, iRegP_R15 result,
10119                                        iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13, iRegP_R16 tmpR16)
10120 %{
10121   predicate(UseSecondarySupersTable);
10122   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
10123   effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16);
10124 
10125   ins_cost(7 * DEFAULT_COST); // needs to be less than competing nodes
10126   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
10127 
10128   ins_encode %{
10129     bool success = false;
10130     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
10131     if (InlineSecondarySupersTest) {
10132       success = __ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register, $result$$Register,
10133                                                  $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10134                                                  $tmpR16$$Register, super_klass_slot);
10135     } else {
10136       address call = __ reloc_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
10137       success = (call != nullptr);
10138     }
10139     if (!success) {
10140       ciEnv::current()->record_failure("CodeCache is full");
10141       return;
10142     }
10143   %}
10144 
10145   ins_pipe(pipe_class_memory);
10146 %}
10147 
10148 instruct partialSubtypeCheckVsZero(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp,
10149                                    immP0 zero, rFlagsReg cr)
10150 %{
10151   match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
10152   effect(KILL tmp, KILL result);
10153 
10154   ins_cost(11 * DEFAULT_COST);
10155   format %{ "partialSubtypeCheck $result, $sub, $super == 0\t#@partialSubtypeCheckVsZero" %}
10156 
10157   ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10158 
10159   opcode(0x0); // Don't zero result reg on hit
10160 
10161   ins_pipe(pipe_class_memory);
10162 %}
10163 
10164 instruct string_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10165                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10166 %{
10167   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU);
10168   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10169   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10170 
10171   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %}
10172   ins_encode %{
10173     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10174     __ string_compare($str1$$Register, $str2$$Register,
10175                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10176                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10177                       StrIntrinsicNode::UU);
10178   %}
10179   ins_pipe(pipe_class_memory);
10180 %}
10181 
10182 instruct string_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10183                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10184 %{
10185   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL);
10186   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10187   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10188 
10189   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %}
10190   ins_encode %{
10191     __ string_compare($str1$$Register, $str2$$Register,
10192                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10193                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10194                       StrIntrinsicNode::LL);
10195   %}
10196   ins_pipe(pipe_class_memory);
10197 %}
10198 
10199 instruct string_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10200                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10201 %{
10202   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL);
10203   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10204   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10205 
10206   format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %}
10207   ins_encode %{
10208     __ string_compare($str1$$Register, $str2$$Register,
10209                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10210                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10211                       StrIntrinsicNode::UL);
10212   %}
10213   ins_pipe(pipe_class_memory);
10214 %}
10215 
10216 instruct string_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10217                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3,
10218                           rFlagsReg cr)
10219 %{
10220   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU);
10221   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10222   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10223 
10224   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %}
10225   ins_encode %{
10226     __ string_compare($str1$$Register, $str2$$Register,
10227                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10228                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10229                       StrIntrinsicNode::LU);
10230   %}
10231   ins_pipe(pipe_class_memory);
10232 %}
10233 
10234 instruct string_indexofUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10235                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10236                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10237 %{
10238   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10239   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10240   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10241          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10242 
10243   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
10244   ins_encode %{
10245     __ string_indexof($str1$$Register, $str2$$Register,
10246                       $cnt1$$Register, $cnt2$$Register,
10247                       $tmp1$$Register, $tmp2$$Register,
10248                       $tmp3$$Register, $tmp4$$Register,
10249                       $tmp5$$Register, $tmp6$$Register,
10250                       $result$$Register, StrIntrinsicNode::UU);
10251   %}
10252   ins_pipe(pipe_class_memory);
10253 %}
10254 
10255 instruct string_indexofLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10256                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10257                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10258 %{
10259   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10260   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10261   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10262          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10263 
10264   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
10265   ins_encode %{
10266     __ string_indexof($str1$$Register, $str2$$Register,
10267                       $cnt1$$Register, $cnt2$$Register,
10268                       $tmp1$$Register, $tmp2$$Register,
10269                       $tmp3$$Register, $tmp4$$Register,
10270                       $tmp5$$Register, $tmp6$$Register,
10271                       $result$$Register, StrIntrinsicNode::LL);
10272   %}
10273   ins_pipe(pipe_class_memory);
10274 %}
10275 
10276 instruct string_indexofUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10277                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10278                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10279 %{
10280   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10281   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10282   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10283          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10284   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
10285 
10286   ins_encode %{
10287     __ string_indexof($str1$$Register, $str2$$Register,
10288                       $cnt1$$Register, $cnt2$$Register,
10289                       $tmp1$$Register, $tmp2$$Register,
10290                       $tmp3$$Register, $tmp4$$Register,
10291                       $tmp5$$Register, $tmp6$$Register,
10292                       $result$$Register, StrIntrinsicNode::UL);
10293   %}
10294   ins_pipe(pipe_class_memory);
10295 %}
10296 
10297 instruct string_indexof_conUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10298                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10299                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10300 %{
10301   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10302   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10303   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10304          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10305 
10306   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
10307 
10308   ins_encode %{
10309     int icnt2 = (int)$int_cnt2$$constant;
10310     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10311                                  $cnt1$$Register, zr,
10312                                  $tmp1$$Register, $tmp2$$Register,
10313                                  $tmp3$$Register, $tmp4$$Register,
10314                                  icnt2, $result$$Register, StrIntrinsicNode::UU);
10315   %}
10316   ins_pipe(pipe_class_memory);
10317 %}
10318 
10319 instruct string_indexof_conLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10320                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10321                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10322 %{
10323   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10324   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10325   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10326          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10327 
10328   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
10329   ins_encode %{
10330     int icnt2 = (int)$int_cnt2$$constant;
10331     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10332                                  $cnt1$$Register, zr,
10333                                  $tmp1$$Register, $tmp2$$Register,
10334                                  $tmp3$$Register, $tmp4$$Register,
10335                                  icnt2, $result$$Register, StrIntrinsicNode::LL);
10336   %}
10337   ins_pipe(pipe_class_memory);
10338 %}
10339 
10340 instruct string_indexof_conUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10341                               immI_1 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10342                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10343 %{
10344   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10345   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10346   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10347          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10348 
10349   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
10350   ins_encode %{
10351     int icnt2 = (int)$int_cnt2$$constant;
10352     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10353                                  $cnt1$$Register, zr,
10354                                  $tmp1$$Register, $tmp2$$Register,
10355                                  $tmp3$$Register, $tmp4$$Register,
10356                                  icnt2, $result$$Register, StrIntrinsicNode::UL);
10357   %}
10358   ins_pipe(pipe_class_memory);
10359 %}
10360 
10361 instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10362                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10363                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10364 %{
10365   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10366   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
10367   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10368          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10369 
10370   format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10371   ins_encode %{
10372     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10373                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
10374                            $tmp3$$Register, $tmp4$$Register, false /* isU */);
10375   %}
10376   ins_pipe(pipe_class_memory);
10377 %}
10378 
10379 
10380 instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10381                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10382                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10383 %{
10384   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10385   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
10386   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10387          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10388 
10389   format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10390   ins_encode %{
10391     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10392                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
10393                            $tmp3$$Register, $tmp4$$Register, true /* isL */);
10394   %}
10395   ins_pipe(pipe_class_memory);
10396 %}
10397 
10398 // clearing of an array
10399 instruct clearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, iRegP_R30 tmp1,
10400                             iRegP_R31 tmp2, Universe dummy)
10401 %{
10402   // temp registers must match the one used in StubGenerator::generate_zero_blocks()
10403   predicate(UseBlockZeroing || !UseRVV);
10404   match(Set dummy (ClearArray cnt base));
10405   effect(USE_KILL cnt, USE_KILL base, TEMP tmp1, TEMP tmp2);
10406 
10407   ins_cost(4 * DEFAULT_COST);
10408   format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %}
10409 
10410   ins_encode %{
10411     address tpc = __ zero_words($base$$Register, $cnt$$Register);
10412     if (tpc == nullptr) {
10413       ciEnv::current()->record_failure("CodeCache is full");
10414       return;
10415     }
10416   %}
10417 
10418   ins_pipe(pipe_class_memory);
10419 %}
10420 
10421 instruct clearArray_imm_reg(immL cnt, iRegP_R28 base, Universe dummy, rFlagsReg cr)
10422 %{
10423   predicate(!UseRVV && (uint64_t)n->in(2)->get_long()
10424             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
10425   match(Set dummy (ClearArray cnt base));
10426   effect(USE_KILL base, KILL cr);
10427 
10428   ins_cost(4 * DEFAULT_COST);
10429   format %{ "ClearArray $cnt, $base\t#@clearArray_imm_reg" %}
10430 
10431   ins_encode %{
10432     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
10433   %}
10434 
10435   ins_pipe(pipe_class_memory);
10436 %}
10437 
10438 instruct string_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt,
10439                         iRegI_R10 result, rFlagsReg cr)
10440 %{
10441   predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
10442   match(Set result (StrEquals (Binary str1 str2) cnt));
10443   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
10444 
10445   format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %}
10446   ins_encode %{
10447     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10448     __ string_equals($str1$$Register, $str2$$Register,
10449                      $result$$Register, $cnt$$Register);
10450   %}
10451   ins_pipe(pipe_class_memory);
10452 %}
10453 
10454 instruct array_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10455                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10456 %{
10457   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
10458   match(Set result (AryEq ary1 ary2));
10459   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10460 
10461   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsB // KILL all" %}
10462   ins_encode %{
10463     __ arrays_equals($ary1$$Register, $ary2$$Register,
10464                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10465                      $result$$Register, 1);
10466   %}
10467   ins_pipe(pipe_class_memory);
10468 %}
10469 
10470 instruct array_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10471                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10472 %{
10473   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
10474   match(Set result (AryEq ary1 ary2));
10475   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10476 
10477   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsC // KILL all" %}
10478   ins_encode %{
10479     __ arrays_equals($ary1$$Register, $ary2$$Register,
10480                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10481                      $result$$Register, 2);
10482   %}
10483   ins_pipe(pipe_class_memory);
10484 %}
10485 
10486 // fast ArraysSupport.vectorizedHashCode
10487 instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
10488                          iRegLNoSp tmp1, iRegLNoSp tmp2,
10489                          iRegLNoSp tmp3, iRegLNoSp tmp4,
10490                          iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
10491 %{
10492   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
10493   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
10494          USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
10495 
10496   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
10497   ins_encode %{
10498     __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
10499                        $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10500                        $tmp4$$Register, $tmp5$$Register, $tmp6$$Register,
10501                        (BasicType)$basic_type$$constant);
10502   %}
10503   ins_pipe(pipe_class_memory);
10504 %}
10505 
10506 // ============================================================================
10507 // Safepoint Instructions
10508 
10509 instruct safePoint(iRegP poll)
10510 %{
10511   match(SafePoint poll);
10512 
10513   ins_cost(2 * LOAD_COST);
10514   format %{
10515     "lwu zr, [$poll]\t# Safepoint: poll for GC, #@safePoint"
10516   %}
10517   ins_encode %{
10518     __ read_polling_page(as_Register($poll$$reg), 0, relocInfo::poll_type);
10519   %}
10520   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
10521 %}
10522 
10523 // ============================================================================
10524 // This name is KNOWN by the ADLC and cannot be changed.
10525 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
10526 // for this guy.
10527 instruct tlsLoadP(javaThread_RegP dst)
10528 %{
10529   match(Set dst (ThreadLocal));
10530 
10531   ins_cost(0);
10532 
10533   format %{ " -- \t// $dst=Thread::current(), empty, #@tlsLoadP" %}
10534 
10535   size(0);
10536 
10537   ins_encode( /*empty*/ );
10538 
10539   ins_pipe(pipe_class_empty);
10540 %}
10541 
10542 // inlined locking and unlocking
10543 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
10544 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
10545                      iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
10546 %{
10547   predicate(LockingMode != LM_LIGHTWEIGHT);
10548   match(Set cr (FastLock object box));
10549   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
10550 
10551   ins_cost(10 * DEFAULT_COST);
10552   format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
10553 
10554   ins_encode %{
10555     __ fast_lock($object$$Register, $box$$Register,
10556                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
10557   %}
10558 
10559   ins_pipe(pipe_serial);
10560 %}
10561 
10562 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
10563 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2)
10564 %{
10565   predicate(LockingMode != LM_LIGHTWEIGHT);
10566   match(Set cr (FastUnlock object box));
10567   effect(TEMP tmp1, TEMP tmp2);
10568 
10569   ins_cost(10 * DEFAULT_COST);
10570   format %{ "fastunlock $object,$box\t! kills $tmp1, $tmp2, #@cmpFastUnlock" %}
10571 
10572   ins_encode %{
10573     __ fast_unlock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
10574   %}
10575 
10576   ins_pipe(pipe_serial);
10577 %}
10578 
10579 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
10580                                 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
10581 %{
10582   predicate(LockingMode == LM_LIGHTWEIGHT);
10583   match(Set cr (FastLock object box));
10584   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
10585 
10586   ins_cost(10 * DEFAULT_COST);
10587   format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLockLightweight" %}
10588 
10589   ins_encode %{
10590     __ fast_lock_lightweight($object$$Register, $box$$Register,
10591                              $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
10592   %}
10593 
10594   ins_pipe(pipe_serial);
10595 %}
10596 
10597 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box,
10598                                   iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
10599 %{
10600   predicate(LockingMode == LM_LIGHTWEIGHT);
10601   match(Set cr (FastUnlock object box));
10602   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
10603 
10604   ins_cost(10 * DEFAULT_COST);
10605   format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %}
10606 
10607   ins_encode %{
10608     __ fast_unlock_lightweight($object$$Register, $box$$Register,
10609                                $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
10610   %}
10611 
10612   ins_pipe(pipe_serial);
10613 %}
10614 
10615 // Tail Call; Jump from runtime stub to Java code.
10616 // Also known as an 'interprocedural jump'.
10617 // Target of jump will eventually return to caller.
10618 // TailJump below removes the return address.
10619 // Don't use fp for 'jump_target' because a MachEpilogNode has already been
10620 // emitted just above the TailCall which has reset fp to the caller state.
10621 instruct TailCalljmpInd(iRegPNoSpNoFp jump_target, inline_cache_RegP method_oop)
10622 %{
10623   match(TailCall jump_target method_oop);
10624 
10625   ins_cost(BRANCH_COST);
10626 
10627   format %{ "jalr $jump_target\t# $method_oop holds method oop, #@TailCalljmpInd." %}
10628 
10629   ins_encode(riscv_enc_tail_call(jump_target));
10630 
10631   ins_pipe(pipe_class_call);
10632 %}
10633 
10634 instruct TailjmpInd(iRegPNoSpNoFp jump_target, iRegP_R10 ex_oop)
10635 %{
10636   match(TailJump jump_target ex_oop);
10637 
10638   ins_cost(ALU_COST + BRANCH_COST);
10639 
10640   format %{ "jalr $jump_target\t# $ex_oop holds exception oop, #@TailjmpInd." %}
10641 
10642   ins_encode(riscv_enc_tail_jmp(jump_target));
10643 
10644   ins_pipe(pipe_class_call);
10645 %}
10646 
10647 // Forward exception.
10648 instruct ForwardExceptionjmp()
10649 %{
10650   match(ForwardException);
10651 
10652   ins_cost(BRANCH_COST);
10653 
10654   format %{ "j forward_exception_stub\t#@ForwardException" %}
10655 
10656   ins_encode %{
10657     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
10658   %}
10659 
10660   ins_pipe(pipe_class_call);
10661 %}
10662 
10663 // Create exception oop: created by stack-crawling runtime code.
10664 // Created exception is now available to this handler, and is setup
10665 // just prior to jumping to this handler. No code emitted.
10666 instruct CreateException(iRegP_R10 ex_oop)
10667 %{
10668   match(Set ex_oop (CreateEx));
10669 
10670   ins_cost(0);
10671   format %{ " -- \t// exception oop; no code emitted, #@CreateException" %}
10672 
10673   size(0);
10674 
10675   ins_encode( /*empty*/ );
10676 
10677   ins_pipe(pipe_class_empty);
10678 %}
10679 
10680 // Rethrow exception: The exception oop will come in the first
10681 // argument position. Then JUMP (not call) to the rethrow stub code.
10682 instruct RethrowException()
10683 %{
10684   match(Rethrow);
10685 
10686   ins_cost(BRANCH_COST);
10687 
10688   format %{ "j rethrow_stub\t#@RethrowException" %}
10689 
10690   ins_encode(riscv_enc_rethrow());
10691 
10692   ins_pipe(pipe_class_call);
10693 %}
10694 
10695 // Return Instruction
10696 // epilog node loads ret address into ra as part of frame pop
10697 instruct Ret()
10698 %{
10699   match(Return);
10700 
10701   ins_cost(BRANCH_COST);
10702   format %{ "ret\t// return register, #@Ret" %}
10703 
10704   ins_encode(riscv_enc_ret());
10705 
10706   ins_pipe(pipe_branch);
10707 %}
10708 
10709 // Die now.
10710 instruct ShouldNotReachHere() %{
10711   match(Halt);
10712 
10713   ins_cost(BRANCH_COST);
10714 
10715   format %{ "#@ShouldNotReachHere" %}
10716 
10717   ins_encode %{
10718     if (is_reachable()) {
10719       __ stop(_halt_reason);
10720     }
10721   %}
10722 
10723   ins_pipe(pipe_class_default);
10724 %}
10725 
10726 
10727 //----------PEEPHOLE RULES-----------------------------------------------------
10728 // These must follow all instruction definitions as they use the names
10729 // defined in the instructions definitions.
10730 //
10731 // peepmatch ( root_instr_name [preceding_instruction]* );
10732 //
10733 // peepconstraint %{
10734 // (instruction_number.operand_name relational_op instruction_number.operand_name
10735 //  [, ...] );
10736 // // instruction numbers are zero-based using left to right order in peepmatch
10737 //
10738 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
10739 // // provide an instruction_number.operand_name for each operand that appears
10740 // // in the replacement instruction's match rule
10741 //
10742 // ---------VM FLAGS---------------------------------------------------------
10743 //
10744 // All peephole optimizations can be turned off using -XX:-OptoPeephole
10745 //
10746 // Each peephole rule is given an identifying number starting with zero and
10747 // increasing by one in the order seen by the parser.  An individual peephole
10748 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
10749 // on the command-line.
10750 //
10751 // ---------CURRENT LIMITATIONS----------------------------------------------
10752 //
10753 // Only match adjacent instructions in same basic block
10754 // Only equality constraints
10755 // Only constraints between operands, not (0.dest_reg == RAX_enc)
10756 // Only one replacement instruction
10757 //
10758 //----------SMARTSPILL RULES---------------------------------------------------
10759 // These must follow all instruction definitions as they use the names
10760 // defined in the instructions definitions.
10761 
10762 // Local Variables:
10763 // mode: c++
10764 // End: