1 //
    2 // Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
    4 // Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
    5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    6 //
    7 // This code is free software; you can redistribute it and/or modify it
    8 // under the terms of the GNU General Public License version 2 only, as
    9 // published by the Free Software Foundation.
   10 //
   11 // This code is distributed in the hope that it will be useful, but WITHOUT
   12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14 // version 2 for more details (a copy is included in the LICENSE file that
   15 // accompanied this code).
   16 //
   17 // You should have received a copy of the GNU General Public License version
   18 // 2 along with this work; if not, write to the Free Software Foundation,
   19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20 //
   21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   22 // or visit www.oracle.com if you need additional information or have any
   23 // questions.
   24 //
   25 //
   26 
   27 // RISCV Architecture Description File
   28 
   29 //----------REGISTER DEFINITION BLOCK------------------------------------------
   30 // This information is used by the matcher and the register allocator to
   31 // describe individual registers and classes of registers within the target
   32 // architecture.
   33 
   34 register %{
   35 //----------Architecture Description Register Definitions----------------------
   36 // General Registers
   37 // "reg_def"  name ( register save type, C convention save type,
   38 //                   ideal register type, encoding );
   39 // Register Save Types:
   40 //
   41 // NS  = No-Save:       The register allocator assumes that these registers
   42 //                      can be used without saving upon entry to the method, &
   43 //                      that they do not need to be saved at call sites.
   44 //
   45 // SOC = Save-On-Call:  The register allocator assumes that these registers
   46 //                      can be used without saving upon entry to the method,
   47 //                      but that they must be saved at call sites.
   48 //
   49 // SOE = Save-On-Entry: The register allocator assumes that these registers
   50 //                      must be saved before using them upon entry to the
   51 //                      method, but they do not need to be saved at call
   52 //                      sites.
   53 //
   54 // AS  = Always-Save:   The register allocator assumes that these registers
   55 //                      must be saved before using them upon entry to the
   56 //                      method, & that they must be saved at call sites.
   57 //
   58 // Ideal Register Type is used to determine how to save & restore a
   59 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   60 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   61 //
   62 // The encoding number is the actual bit-pattern placed into the opcodes.
   63 
   64 // We must define the 64 bit int registers in two 32 bit halves, the
   65 // real lower register and a virtual upper half register. upper halves
   66 // are used by the register allocator but are not actually supplied as
   67 // operands to memory ops.
   68 //
   69 // follow the C1 compiler in making registers
   70 //
   71 //   x7, x9-x17, x27-x31 volatile (caller save)
   72 //   x0-x4, x8, x23 system (no save, no allocate)
   73 //   x5-x6 non-allocatable (so we can use them as temporary regs)
   74 
   75 //
   76 // as regards Java usage. we don't use any callee save registers
   77 // because this makes it difficult to de-optimise a frame (see comment
   78 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   79 //
   80 
   81 // General Registers
   82 
   83 reg_def R0      ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()         ); // zr
   84 reg_def R0_H    ( NS,  NS,  Op_RegI, 0,  x0->as_VMReg()->next() );
   85 reg_def R1      ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()         ); // ra
   86 reg_def R1_H    ( NS,  SOC, Op_RegI, 1,  x1->as_VMReg()->next() );
   87 reg_def R2      ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()         ); // sp
   88 reg_def R2_H    ( NS,  NS,  Op_RegI, 2,  x2->as_VMReg()->next() );
   89 reg_def R3      ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()         ); // gp
   90 reg_def R3_H    ( NS,  NS,  Op_RegI, 3,  x3->as_VMReg()->next() );
   91 reg_def R4      ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()         ); // tp
   92 reg_def R4_H    ( NS,  NS,  Op_RegI, 4,  x4->as_VMReg()->next() );
   93 reg_def R7      ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()         );
   94 reg_def R7_H    ( SOC, SOC, Op_RegI, 7,  x7->as_VMReg()->next() );
   95 reg_def R8      ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()         ); // fp
   96 reg_def R8_H    ( NS,  SOE, Op_RegI, 8,  x8->as_VMReg()->next() );
   97 reg_def R9      ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()         );
   98 reg_def R9_H    ( SOC, SOE, Op_RegI, 9,  x9->as_VMReg()->next() );
   99 reg_def R10     ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()        );
  100 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next());
  101 reg_def R11     ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()        );
  102 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next());
  103 reg_def R12     ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()        );
  104 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next());
  105 reg_def R13     ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()        );
  106 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next());
  107 reg_def R14     ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()        );
  108 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next());
  109 reg_def R15     ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()        );
  110 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next());
  111 reg_def R16     ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()        );
  112 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next());
  113 reg_def R17     ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()        );
  114 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next());
  115 reg_def R18     ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()        );
  116 reg_def R18_H   ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next());
  117 reg_def R19     ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()        );
  118 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next());
  119 reg_def R20     ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()        ); // caller esp
  120 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next());
  121 reg_def R21     ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()        );
  122 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next());
  123 reg_def R22     ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()        );
  124 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next());
  125 reg_def R23     ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()        ); // java thread
  126 reg_def R23_H   ( NS,  SOE, Op_RegI, 23, x23->as_VMReg()->next());
  127 reg_def R24     ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()        );
  128 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next());
  129 reg_def R25     ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()        );
  130 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next());
  131 reg_def R26     ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()        );
  132 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next());
  133 reg_def R27     ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()        ); // heapbase
  134 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next());
  135 reg_def R28     ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()        );
  136 reg_def R28_H   ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next());
  137 reg_def R29     ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()        );
  138 reg_def R29_H   ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next());
  139 reg_def R30     ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()        );
  140 reg_def R30_H   ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next());
  141 reg_def R31     ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()        );
  142 reg_def R31_H   ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next());
  143 
  144 // ----------------------------
  145 // Float/Double Registers
  146 // ----------------------------
  147 
  148 // Double Registers
  149 
  150 // The rules of ADL require that double registers be defined in pairs.
  151 // Each pair must be two 32-bit values, but not necessarily a pair of
  152 // single float registers. In each pair, ADLC-assigned register numbers
  153 // must be adjacent, with the lower number even. Finally, when the
  154 // CPU stores such a register pair to memory, the word associated with
  155 // the lower ADLC-assigned number must be stored to the lower address.
  156 
  157 // RISCV has 32 floating-point registers. Each can store a single
  158 // or double precision floating-point value.
  159 
  160 // for Java use float registers f0-f31 are always save on call whereas
  161 // the platform ABI treats f8-f9 and f18-f27 as callee save). Other
  162 // float registers are SOC as per the platform spec
  163 
  164 reg_def F0    ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()          );
  165 reg_def F0_H  ( SOC, SOC, Op_RegF,  0,  f0->as_VMReg()->next()  );
  166 reg_def F1    ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()          );
  167 reg_def F1_H  ( SOC, SOC, Op_RegF,  1,  f1->as_VMReg()->next()  );
  168 reg_def F2    ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()          );
  169 reg_def F2_H  ( SOC, SOC, Op_RegF,  2,  f2->as_VMReg()->next()  );
  170 reg_def F3    ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()          );
  171 reg_def F3_H  ( SOC, SOC, Op_RegF,  3,  f3->as_VMReg()->next()  );
  172 reg_def F4    ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()          );
  173 reg_def F4_H  ( SOC, SOC, Op_RegF,  4,  f4->as_VMReg()->next()  );
  174 reg_def F5    ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()          );
  175 reg_def F5_H  ( SOC, SOC, Op_RegF,  5,  f5->as_VMReg()->next()  );
  176 reg_def F6    ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()          );
  177 reg_def F6_H  ( SOC, SOC, Op_RegF,  6,  f6->as_VMReg()->next()  );
  178 reg_def F7    ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()          );
  179 reg_def F7_H  ( SOC, SOC, Op_RegF,  7,  f7->as_VMReg()->next()  );
  180 reg_def F8    ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()          );
  181 reg_def F8_H  ( SOC, SOE, Op_RegF,  8,  f8->as_VMReg()->next()  );
  182 reg_def F9    ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()          );
  183 reg_def F9_H  ( SOC, SOE, Op_RegF,  9,  f9->as_VMReg()->next()  );
  184 reg_def F10   ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()         );
  185 reg_def F10_H ( SOC, SOC, Op_RegF,  10, f10->as_VMReg()->next() );
  186 reg_def F11   ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()         );
  187 reg_def F11_H ( SOC, SOC, Op_RegF,  11, f11->as_VMReg()->next() );
  188 reg_def F12   ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()         );
  189 reg_def F12_H ( SOC, SOC, Op_RegF,  12, f12->as_VMReg()->next() );
  190 reg_def F13   ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()         );
  191 reg_def F13_H ( SOC, SOC, Op_RegF,  13, f13->as_VMReg()->next() );
  192 reg_def F14   ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()         );
  193 reg_def F14_H ( SOC, SOC, Op_RegF,  14, f14->as_VMReg()->next() );
  194 reg_def F15   ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()         );
  195 reg_def F15_H ( SOC, SOC, Op_RegF,  15, f15->as_VMReg()->next() );
  196 reg_def F16   ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()         );
  197 reg_def F16_H ( SOC, SOC, Op_RegF,  16, f16->as_VMReg()->next() );
  198 reg_def F17   ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()         );
  199 reg_def F17_H ( SOC, SOC, Op_RegF,  17, f17->as_VMReg()->next() );
  200 reg_def F18   ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()         );
  201 reg_def F18_H ( SOC, SOE, Op_RegF,  18, f18->as_VMReg()->next() );
  202 reg_def F19   ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()         );
  203 reg_def F19_H ( SOC, SOE, Op_RegF,  19, f19->as_VMReg()->next() );
  204 reg_def F20   ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()         );
  205 reg_def F20_H ( SOC, SOE, Op_RegF,  20, f20->as_VMReg()->next() );
  206 reg_def F21   ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()         );
  207 reg_def F21_H ( SOC, SOE, Op_RegF,  21, f21->as_VMReg()->next() );
  208 reg_def F22   ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()         );
  209 reg_def F22_H ( SOC, SOE, Op_RegF,  22, f22->as_VMReg()->next() );
  210 reg_def F23   ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()         );
  211 reg_def F23_H ( SOC, SOE, Op_RegF,  23, f23->as_VMReg()->next() );
  212 reg_def F24   ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()         );
  213 reg_def F24_H ( SOC, SOE, Op_RegF,  24, f24->as_VMReg()->next() );
  214 reg_def F25   ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()         );
  215 reg_def F25_H ( SOC, SOE, Op_RegF,  25, f25->as_VMReg()->next() );
  216 reg_def F26   ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()         );
  217 reg_def F26_H ( SOC, SOE, Op_RegF,  26, f26->as_VMReg()->next() );
  218 reg_def F27   ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()         );
  219 reg_def F27_H ( SOC, SOE, Op_RegF,  27, f27->as_VMReg()->next() );
  220 reg_def F28   ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()         );
  221 reg_def F28_H ( SOC, SOC, Op_RegF,  28, f28->as_VMReg()->next() );
  222 reg_def F29   ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()         );
  223 reg_def F29_H ( SOC, SOC, Op_RegF,  29, f29->as_VMReg()->next() );
  224 reg_def F30   ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()         );
  225 reg_def F30_H ( SOC, SOC, Op_RegF,  30, f30->as_VMReg()->next() );
  226 reg_def F31   ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()         );
  227 reg_def F31_H ( SOC, SOC, Op_RegF,  31, f31->as_VMReg()->next() );
  228 
  229 // ----------------------------
  230 // Vector Registers
  231 // ----------------------------
  232 
  233 // For RVV vector registers, we simply extend vector register size to 4
  234 // 'logical' slots. This is nominally 128 bits but it actually covers
  235 // all possible 'physical' RVV vector register lengths from 128 ~ 1024
  236 // bits. The 'physical' RVV vector register length is detected during
  237 // startup, so the register allocator is able to identify the correct
  238 // number of bytes needed for an RVV spill/unspill.
  239 
  240 reg_def V0    ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()           );
  241 reg_def V0_H  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next()   );
  242 reg_def V0_J  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(2)  );
  243 reg_def V0_K  ( SOC, SOC, Op_VecA, 0,  v0->as_VMReg()->next(3)  );
  244 
  245 reg_def V1    ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()           );
  246 reg_def V1_H  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next()   );
  247 reg_def V1_J  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(2)  );
  248 reg_def V1_K  ( SOC, SOC, Op_VecA, 1,  v1->as_VMReg()->next(3)  );
  249 
  250 reg_def V2    ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()           );
  251 reg_def V2_H  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next()   );
  252 reg_def V2_J  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(2)  );
  253 reg_def V2_K  ( SOC, SOC, Op_VecA, 2,  v2->as_VMReg()->next(3)  );
  254 
  255 reg_def V3    ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()           );
  256 reg_def V3_H  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next()   );
  257 reg_def V3_J  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(2)  );
  258 reg_def V3_K  ( SOC, SOC, Op_VecA, 3,  v3->as_VMReg()->next(3)  );
  259 
  260 reg_def V4    ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()           );
  261 reg_def V4_H  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next()   );
  262 reg_def V4_J  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(2)  );
  263 reg_def V4_K  ( SOC, SOC, Op_VecA, 4,  v4->as_VMReg()->next(3)  );
  264 
  265 reg_def V5    ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()           );
  266 reg_def V5_H  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next()   );
  267 reg_def V5_J  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(2)  );
  268 reg_def V5_K  ( SOC, SOC, Op_VecA, 5,  v5->as_VMReg()->next(3)  );
  269 
  270 reg_def V6    ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()           );
  271 reg_def V6_H  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next()   );
  272 reg_def V6_J  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(2)  );
  273 reg_def V6_K  ( SOC, SOC, Op_VecA, 6,  v6->as_VMReg()->next(3)  );
  274 
  275 reg_def V7    ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()           );
  276 reg_def V7_H  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next()   );
  277 reg_def V7_J  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(2)  );
  278 reg_def V7_K  ( SOC, SOC, Op_VecA, 7,  v7->as_VMReg()->next(3)  );
  279 
  280 reg_def V8    ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()           );
  281 reg_def V8_H  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next()   );
  282 reg_def V8_J  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(2)  );
  283 reg_def V8_K  ( SOC, SOC, Op_VecA, 8,  v8->as_VMReg()->next(3)  );
  284 
  285 reg_def V9    ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()           );
  286 reg_def V9_H  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next()   );
  287 reg_def V9_J  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(2)  );
  288 reg_def V9_K  ( SOC, SOC, Op_VecA, 9,  v9->as_VMReg()->next(3)  );
  289 
  290 reg_def V10   ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()          );
  291 reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next()  );
  292 reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) );
  293 reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) );
  294 
  295 reg_def V11   ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()          );
  296 reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next()  );
  297 reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) );
  298 reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) );
  299 
  300 reg_def V12   ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()          );
  301 reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next()  );
  302 reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) );
  303 reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) );
  304 
  305 reg_def V13   ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()          );
  306 reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next()  );
  307 reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) );
  308 reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) );
  309 
  310 reg_def V14   ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()          );
  311 reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next()  );
  312 reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) );
  313 reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) );
  314 
  315 reg_def V15   ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()          );
  316 reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next()  );
  317 reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) );
  318 reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) );
  319 
  320 reg_def V16   ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()          );
  321 reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next()  );
  322 reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) );
  323 reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) );
  324 
  325 reg_def V17   ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()          );
  326 reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next()  );
  327 reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) );
  328 reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) );
  329 
  330 reg_def V18   ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()          );
  331 reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next()  );
  332 reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) );
  333 reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) );
  334 
  335 reg_def V19   ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()          );
  336 reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next()  );
  337 reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) );
  338 reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) );
  339 
  340 reg_def V20   ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()          );
  341 reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next()  );
  342 reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) );
  343 reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) );
  344 
  345 reg_def V21   ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()          );
  346 reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next()  );
  347 reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) );
  348 reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) );
  349 
  350 reg_def V22   ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()          );
  351 reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next()  );
  352 reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) );
  353 reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) );
  354 
  355 reg_def V23   ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()          );
  356 reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next()  );
  357 reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) );
  358 reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) );
  359 
  360 reg_def V24   ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()          );
  361 reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next()  );
  362 reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) );
  363 reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) );
  364 
  365 reg_def V25   ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()          );
  366 reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next()  );
  367 reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) );
  368 reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) );
  369 
  370 reg_def V26   ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()          );
  371 reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next()  );
  372 reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) );
  373 reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) );
  374 
  375 reg_def V27   ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()          );
  376 reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next()  );
  377 reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) );
  378 reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) );
  379 
  380 reg_def V28   ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()          );
  381 reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next()  );
  382 reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) );
  383 reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) );
  384 
  385 reg_def V29   ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()          );
  386 reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next()  );
  387 reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) );
  388 reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) );
  389 
  390 reg_def V30   ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()          );
  391 reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next()  );
  392 reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) );
  393 reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) );
  394 
  395 reg_def V31   ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()          );
  396 reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next()  );
  397 reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) );
  398 reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) );
  399 
  400 // ----------------------------
  401 // Special Registers
  402 // ----------------------------
  403 
  404 // On riscv, the physical flag register is missing, so we use t1 instead,
  405 // to bridge the RegFlag semantics in share/opto
  406 
  407 reg_def RFLAGS   (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg()        );
  408 
  409 // Specify priority of register selection within phases of register
  410 // allocation.  Highest priority is first.  A useful heuristic is to
  411 // give registers a low priority when they are required by machine
  412 // instructions, like EAX and EDX on I486, and choose no-save registers
  413 // before save-on-call, & save-on-call before save-on-entry.  Registers
  414 // which participate in fixed calling sequences should come last.
  415 // Registers which are used as pairs must fall on an even boundary.
  416 
  417 alloc_class chunk0(
  418     // volatiles
  419     R7,  R7_H,
  420     R28, R28_H,
  421     R29, R29_H,
  422     R30, R30_H,
  423     R31, R31_H,
  424 
  425     // arg registers
  426     R10, R10_H,
  427     R11, R11_H,
  428     R12, R12_H,
  429     R13, R13_H,
  430     R14, R14_H,
  431     R15, R15_H,
  432     R16, R16_H,
  433     R17, R17_H,
  434 
  435     // non-volatiles
  436     R9,  R9_H,
  437     R18, R18_H,
  438     R19, R19_H,
  439     R20, R20_H,
  440     R21, R21_H,
  441     R22, R22_H,
  442     R24, R24_H,
  443     R25, R25_H,
  444     R26, R26_H,
  445 
  446     // non-allocatable registers
  447     R23, R23_H, // java thread
  448     R27, R27_H, // heapbase
  449     R4,  R4_H,  // thread
  450     R8,  R8_H,  // fp
  451     R0,  R0_H,  // zero
  452     R1,  R1_H,  // ra
  453     R2,  R2_H,  // sp
  454     R3,  R3_H,  // gp
  455 );
  456 
  457 alloc_class chunk1(
  458 
  459     // no save
  460     F0,  F0_H,
  461     F1,  F1_H,
  462     F2,  F2_H,
  463     F3,  F3_H,
  464     F4,  F4_H,
  465     F5,  F5_H,
  466     F6,  F6_H,
  467     F7,  F7_H,
  468     F28, F28_H,
  469     F29, F29_H,
  470     F30, F30_H,
  471     F31, F31_H,
  472 
  473     // arg registers
  474     F10, F10_H,
  475     F11, F11_H,
  476     F12, F12_H,
  477     F13, F13_H,
  478     F14, F14_H,
  479     F15, F15_H,
  480     F16, F16_H,
  481     F17, F17_H,
  482 
  483     // non-volatiles
  484     F8,  F8_H,
  485     F9,  F9_H,
  486     F18, F18_H,
  487     F19, F19_H,
  488     F20, F20_H,
  489     F21, F21_H,
  490     F22, F22_H,
  491     F23, F23_H,
  492     F24, F24_H,
  493     F25, F25_H,
  494     F26, F26_H,
  495     F27, F27_H,
  496 );
  497 
  498 alloc_class chunk2(
  499     V0, V0_H, V0_J, V0_K,
  500     V1, V1_H, V1_J, V1_K,
  501     V2, V2_H, V2_J, V2_K,
  502     V3, V3_H, V3_J, V3_K,
  503     V4, V4_H, V4_J, V4_K,
  504     V5, V5_H, V5_J, V5_K,
  505     V6, V6_H, V6_J, V6_K,
  506     V7, V7_H, V7_J, V7_K,
  507     V8, V8_H, V8_J, V8_K,
  508     V9, V9_H, V9_J, V9_K,
  509     V10, V10_H, V10_J, V10_K,
  510     V11, V11_H, V11_J, V11_K,
  511     V12, V12_H, V12_J, V12_K,
  512     V13, V13_H, V13_J, V13_K,
  513     V14, V14_H, V14_J, V14_K,
  514     V15, V15_H, V15_J, V15_K,
  515     V16, V16_H, V16_J, V16_K,
  516     V17, V17_H, V17_J, V17_K,
  517     V18, V18_H, V18_J, V18_K,
  518     V19, V19_H, V19_J, V19_K,
  519     V20, V20_H, V20_J, V20_K,
  520     V21, V21_H, V21_J, V21_K,
  521     V22, V22_H, V22_J, V22_K,
  522     V23, V23_H, V23_J, V23_K,
  523     V24, V24_H, V24_J, V24_K,
  524     V25, V25_H, V25_J, V25_K,
  525     V26, V26_H, V26_J, V26_K,
  526     V27, V27_H, V27_J, V27_K,
  527     V28, V28_H, V28_J, V28_K,
  528     V29, V29_H, V29_J, V29_K,
  529     V30, V30_H, V30_J, V30_K,
  530     V31, V31_H, V31_J, V31_K,
  531 );
  532 
  533 alloc_class chunk3(RFLAGS);
  534 
  535 //----------Architecture Description Register Classes--------------------------
  536 // Several register classes are automatically defined based upon information in
  537 // this architecture description.
  538 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  539 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  540 //
  541 
  542 // Class for all 32 bit general purpose registers
  543 reg_class all_reg32(
  544     R0,
  545     R1,
  546     R2,
  547     R3,
  548     R4,
  549     R7,
  550     R8,
  551     R9,
  552     R10,
  553     R11,
  554     R12,
  555     R13,
  556     R14,
  557     R15,
  558     R16,
  559     R17,
  560     R18,
  561     R19,
  562     R20,
  563     R21,
  564     R22,
  565     R23,
  566     R24,
  567     R25,
  568     R26,
  569     R27,
  570     R28,
  571     R29,
  572     R30,
  573     R31
  574 );
  575 
  576 // Class for any 32 bit integer registers (excluding zr)
  577 reg_class any_reg32 %{
  578   return _ANY_REG32_mask;
  579 %}
  580 
  581 // Singleton class for R10 int register
  582 reg_class int_r10_reg(R10);
  583 
  584 // Singleton class for R12 int register
  585 reg_class int_r12_reg(R12);
  586 
  587 // Singleton class for R13 int register
  588 reg_class int_r13_reg(R13);
  589 
  590 // Singleton class for R14 int register
  591 reg_class int_r14_reg(R14);
  592 
  593 // Class for all long integer registers
  594 reg_class all_reg(
  595     R0,  R0_H,
  596     R1,  R1_H,
  597     R2,  R2_H,
  598     R3,  R3_H,
  599     R4,  R4_H,
  600     R7,  R7_H,
  601     R8,  R8_H,
  602     R9,  R9_H,
  603     R10, R10_H,
  604     R11, R11_H,
  605     R12, R12_H,
  606     R13, R13_H,
  607     R14, R14_H,
  608     R15, R15_H,
  609     R16, R16_H,
  610     R17, R17_H,
  611     R18, R18_H,
  612     R19, R19_H,
  613     R20, R20_H,
  614     R21, R21_H,
  615     R22, R22_H,
  616     R23, R23_H,
  617     R24, R24_H,
  618     R25, R25_H,
  619     R26, R26_H,
  620     R27, R27_H,
  621     R28, R28_H,
  622     R29, R29_H,
  623     R30, R30_H,
  624     R31, R31_H
  625 );
  626 
  627 // Class for all long integer registers (excluding zr)
  628 reg_class any_reg %{
  629   return _ANY_REG_mask;
  630 %}
  631 
  632 // Class for non-allocatable 32 bit registers
  633 reg_class non_allocatable_reg32(
  634     R0,                       // zr
  635     R1,                       // ra
  636     R2,                       // sp
  637     R3,                       // gp
  638     R4,                       // tp
  639     R23                       // java thread
  640 );
  641 
  642 // Class for non-allocatable 64 bit registers
  643 reg_class non_allocatable_reg(
  644     R0,  R0_H,                // zr
  645     R1,  R1_H,                // ra
  646     R2,  R2_H,                // sp
  647     R3,  R3_H,                // gp
  648     R4,  R4_H,                // tp
  649     R23, R23_H                // java thread
  650 );
  651 
  652 // Class for all non-special integer registers
  653 reg_class no_special_reg32 %{
  654   return _NO_SPECIAL_REG32_mask;
  655 %}
  656 
  657 // Class for all non-special long integer registers
  658 reg_class no_special_reg %{
  659   return _NO_SPECIAL_REG_mask;
  660 %}
  661 
  662 reg_class ptr_reg %{
  663   return _PTR_REG_mask;
  664 %}
  665 
  666 // Class for all non_special pointer registers
  667 reg_class no_special_ptr_reg %{
  668   return _NO_SPECIAL_PTR_REG_mask;
  669 %}
  670 
  671 // Class for all non_special pointer registers (excluding fp)
  672 reg_class no_special_no_fp_ptr_reg %{
  673   return _NO_SPECIAL_NO_FP_PTR_REG_mask;
  674 %}
  675 
  676 // Class for 64 bit register r10
  677 reg_class r10_reg(
  678     R10, R10_H
  679 );
  680 
  681 // Class for 64 bit register r11
  682 reg_class r11_reg(
  683     R11, R11_H
  684 );
  685 
  686 // Class for 64 bit register r12
  687 reg_class r12_reg(
  688     R12, R12_H
  689 );
  690 
  691 // Class for 64 bit register r13
  692 reg_class r13_reg(
  693     R13, R13_H
  694 );
  695 
  696 // Class for 64 bit register r14
  697 reg_class r14_reg(
  698     R14, R14_H
  699 );
  700 
  701 // Class for 64 bit register r15
  702 reg_class r15_reg(
  703     R15, R15_H
  704 );
  705 
  706 // Class for 64 bit register r16
  707 reg_class r16_reg(
  708     R16, R16_H
  709 );
  710 
  711 // Class for method register
  712 reg_class method_reg(
  713     R31, R31_H
  714 );
  715 
  716 // Class for java thread register
  717 reg_class java_thread_reg(
  718     R23, R23_H
  719 );
  720 
  721 reg_class r28_reg(
  722     R28, R28_H
  723 );
  724 
  725 reg_class r29_reg(
  726     R29, R29_H
  727 );
  728 
  729 reg_class r30_reg(
  730     R30, R30_H
  731 );
  732 
  733 reg_class r31_reg(
  734     R31, R31_H
  735 );
  736 
  737 // Class for zero registesr
  738 reg_class zr_reg(
  739     R0, R0_H
  740 );
  741 
  742 // Class for thread register
  743 reg_class thread_reg(
  744     R4, R4_H
  745 );
  746 
  747 // Class for frame pointer register
  748 reg_class fp_reg(
  749     R8, R8_H
  750 );
  751 
  752 // Class for link register
  753 reg_class ra_reg(
  754     R1, R1_H
  755 );
  756 
  757 // Class for long sp register
  758 reg_class sp_reg(
  759     R2, R2_H
  760 );
  761 
  762 // Class for all float registers
  763 reg_class float_reg(
  764     F0,
  765     F1,
  766     F2,
  767     F3,
  768     F4,
  769     F5,
  770     F6,
  771     F7,
  772     F8,
  773     F9,
  774     F10,
  775     F11,
  776     F12,
  777     F13,
  778     F14,
  779     F15,
  780     F16,
  781     F17,
  782     F18,
  783     F19,
  784     F20,
  785     F21,
  786     F22,
  787     F23,
  788     F24,
  789     F25,
  790     F26,
  791     F27,
  792     F28,
  793     F29,
  794     F30,
  795     F31
  796 );
  797 
  798 // Double precision float registers have virtual `high halves' that
  799 // are needed by the allocator.
  800 // Class for all double registers
  801 reg_class double_reg(
  802     F0,  F0_H,
  803     F1,  F1_H,
  804     F2,  F2_H,
  805     F3,  F3_H,
  806     F4,  F4_H,
  807     F5,  F5_H,
  808     F6,  F6_H,
  809     F7,  F7_H,
  810     F8,  F8_H,
  811     F9,  F9_H,
  812     F10, F10_H,
  813     F11, F11_H,
  814     F12, F12_H,
  815     F13, F13_H,
  816     F14, F14_H,
  817     F15, F15_H,
  818     F16, F16_H,
  819     F17, F17_H,
  820     F18, F18_H,
  821     F19, F19_H,
  822     F20, F20_H,
  823     F21, F21_H,
  824     F22, F22_H,
  825     F23, F23_H,
  826     F24, F24_H,
  827     F25, F25_H,
  828     F26, F26_H,
  829     F27, F27_H,
  830     F28, F28_H,
  831     F29, F29_H,
  832     F30, F30_H,
  833     F31, F31_H
  834 );
  835 
  836 // Class for RVV vector registers
  837 // Note: v0, v30 and v31 are used as mask registers.
  838 reg_class vectora_reg(
  839     V1, V1_H, V1_J, V1_K,
  840     V2, V2_H, V2_J, V2_K,
  841     V3, V3_H, V3_J, V3_K,
  842     V4, V4_H, V4_J, V4_K,
  843     V5, V5_H, V5_J, V5_K,
  844     V6, V6_H, V6_J, V6_K,
  845     V7, V7_H, V7_J, V7_K,
  846     V8, V8_H, V8_J, V8_K,
  847     V9, V9_H, V9_J, V9_K,
  848     V10, V10_H, V10_J, V10_K,
  849     V11, V11_H, V11_J, V11_K,
  850     V12, V12_H, V12_J, V12_K,
  851     V13, V13_H, V13_J, V13_K,
  852     V14, V14_H, V14_J, V14_K,
  853     V15, V15_H, V15_J, V15_K,
  854     V16, V16_H, V16_J, V16_K,
  855     V17, V17_H, V17_J, V17_K,
  856     V18, V18_H, V18_J, V18_K,
  857     V19, V19_H, V19_J, V19_K,
  858     V20, V20_H, V20_J, V20_K,
  859     V21, V21_H, V21_J, V21_K,
  860     V22, V22_H, V22_J, V22_K,
  861     V23, V23_H, V23_J, V23_K,
  862     V24, V24_H, V24_J, V24_K,
  863     V25, V25_H, V25_J, V25_K,
  864     V26, V26_H, V26_J, V26_K,
  865     V27, V27_H, V27_J, V27_K,
  866     V28, V28_H, V28_J, V28_K,
  867     V29, V29_H, V29_J, V29_K
  868 );
  869 
  870 // Class for 64 bit register f0
  871 reg_class f0_reg(
  872     F0, F0_H
  873 );
  874 
  875 // Class for 64 bit register f1
  876 reg_class f1_reg(
  877     F1, F1_H
  878 );
  879 
  880 // Class for 64 bit register f2
  881 reg_class f2_reg(
  882     F2, F2_H
  883 );
  884 
  885 // Class for 64 bit register f3
  886 reg_class f3_reg(
  887     F3, F3_H
  888 );
  889 
  890 // class for vector register v1
  891 reg_class v1_reg(
  892     V1, V1_H, V1_J, V1_K
  893 );
  894 
  895 // class for vector register v2
  896 reg_class v2_reg(
  897     V2, V2_H, V2_J, V2_K
  898 );
  899 
  900 // class for vector register v3
  901 reg_class v3_reg(
  902     V3, V3_H, V3_J, V3_K
  903 );
  904 
  905 // class for vector register v4
  906 reg_class v4_reg(
  907     V4, V4_H, V4_J, V4_K
  908 );
  909 
  910 // class for vector register v5
  911 reg_class v5_reg(
  912     V5, V5_H, V5_J, V5_K
  913 );
  914 
  915 // class for vector register v6
  916 reg_class v6_reg(
  917     V6, V6_H, V6_J, V6_K
  918 );
  919 
  920 // class for vector register v7
  921 reg_class v7_reg(
  922     V7, V7_H, V7_J, V7_K
  923 );
  924 
  925 // class for vector register v8
  926 reg_class v8_reg(
  927     V8, V8_H, V8_J, V8_K
  928 );
  929 
  930 // class for vector register v9
  931 reg_class v9_reg(
  932     V9, V9_H, V9_J, V9_K
  933 );
  934 
  935 // class for vector register v10
  936 reg_class v10_reg(
  937     V10, V10_H, V10_J, V10_K
  938 );
  939 
  940 // class for vector register v11
  941 reg_class v11_reg(
  942     V11, V11_H, V11_J, V11_K
  943 );
  944 
  945 // class for condition codes
  946 reg_class reg_flags(RFLAGS);
  947 
  948 // Class for RVV v0 mask register
  949 // https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#53-vector-masking
  950 // The mask value used to control execution of a masked vector
  951 // instruction is always supplied by vector register v0.
  952 reg_class vmask_reg_v0 (
  953     V0
  954 );
  955 
  956 // Class for RVV mask registers
  957 // We need two more vmask registers to do the vector mask logical ops,
  958 // so define v30, v31 as mask register too.
  959 reg_class vmask_reg (
  960     V0,
  961     V30,
  962     V31
  963 );
  964 %}
  965 
  966 //----------DEFINITION BLOCK---------------------------------------------------
  967 // Define name --> value mappings to inform the ADLC of an integer valued name
  968 // Current support includes integer values in the range [0, 0x7FFFFFFF]
  969 // Format:
  970 //        int_def  <name>         ( <int_value>, <expression>);
  971 // Generated Code in ad_<arch>.hpp
  972 //        #define  <name>   (<expression>)
  973 //        // value == <int_value>
  974 // Generated code in ad_<arch>.cpp adlc_verification()
  975 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
  976 //
  977 
  978 // we follow the ppc-aix port in using a simple cost model which ranks
  979 // register operations as cheap, memory ops as more expensive and
  980 // branches as most expensive. the first two have a low as well as a
  981 // normal cost. huge cost appears to be a way of saying don't do
  982 // something
  983 
  984 definitions %{
  985   // The default cost (of a register move instruction).
  986   int_def DEFAULT_COST         (  100,               100);
  987   int_def ALU_COST             (  100,  1 * DEFAULT_COST);          // unknown, const, arith, shift, slt,
  988                                                                     // multi, auipc, nop, logical, move
  989   int_def LOAD_COST            (  300,  3 * DEFAULT_COST);          // load, fpload
  990   int_def STORE_COST           (  100,  1 * DEFAULT_COST);          // store, fpstore
  991   int_def XFER_COST            (  300,  3 * DEFAULT_COST);          // mfc, mtc, fcvt, fmove, fcmp
  992   int_def FMVX_COST            (  100,  1 * DEFAULT_COST);          // shuffles with no conversion
  993   int_def BRANCH_COST          (  200,  2 * DEFAULT_COST);          // branch, jmp, call
  994   int_def IMUL_COST            ( 1000, 10 * DEFAULT_COST);          // imul
  995   int_def IDIVSI_COST          ( 3400, 34 * DEFAULT_COST);          // idivsi
  996   int_def IDIVDI_COST          ( 6600, 66 * DEFAULT_COST);          // idivdi
  997   int_def FMUL_SINGLE_COST     (  500,  5 * DEFAULT_COST);          // fmul, fmadd
  998   int_def FMUL_DOUBLE_COST     (  700,  7 * DEFAULT_COST);          // fmul, fmadd
  999   int_def FDIV_COST            ( 2000, 20 * DEFAULT_COST);          // fdiv
 1000   int_def FSQRT_COST           ( 2500, 25 * DEFAULT_COST);          // fsqrt
 1001   int_def VOLATILE_REF_COST    ( 1000, 10 * DEFAULT_COST);
 1002   int_def CACHE_MISS_COST      ( 2000, 20 * DEFAULT_COST);          // typicall cache miss penalty
 1003 %}
 1004 
 1005 
 1006 
 1007 //----------SOURCE BLOCK-------------------------------------------------------
 1008 // This is a block of C++ code which provides values, functions, and
 1009 // definitions necessary in the rest of the architecture description
 1010 
 1011 source_hpp %{
 1012 
 1013 #include "asm/macroAssembler.hpp"
 1014 #include "gc/shared/barrierSetAssembler.hpp"
 1015 #include "gc/shared/cardTable.hpp"
 1016 #include "gc/shared/cardTableBarrierSet.hpp"
 1017 #include "gc/shared/collectedHeap.hpp"
 1018 #include "opto/addnode.hpp"
 1019 #include "opto/convertnode.hpp"
 1020 #include "runtime/objectMonitor.hpp"
 1021 
 1022 extern RegMask _ANY_REG32_mask;
 1023 extern RegMask _ANY_REG_mask;
 1024 extern RegMask _PTR_REG_mask;
 1025 extern RegMask _NO_SPECIAL_REG32_mask;
 1026 extern RegMask _NO_SPECIAL_REG_mask;
 1027 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1028 extern RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1029 
 1030 class CallStubImpl {
 1031 
 1032   //--------------------------------------------------------------
 1033   //---<  Used for optimization in Compile::shorten_branches  >---
 1034   //--------------------------------------------------------------
 1035 
 1036  public:
 1037   // Size of call trampoline stub.
 1038   static uint size_call_trampoline() {
 1039     return 0; // no call trampolines on this platform
 1040   }
 1041 
 1042   // number of relocations needed by a call trampoline stub
 1043   static uint reloc_call_trampoline() {
 1044     return 0; // no call trampolines on this platform
 1045   }
 1046 };
 1047 
 1048 class HandlerImpl {
 1049 
 1050  public:
 1051 
 1052   static int emit_exception_handler(C2_MacroAssembler *masm);
 1053   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1054 
 1055   static uint size_exception_handler() {
 1056     return MacroAssembler::far_branch_size();
 1057   }
 1058 
 1059   static uint size_deopt_handler() {
 1060     // count auipc + far branch
 1061     return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
 1062   }
 1063 };
 1064 
 1065 class Node::PD {
 1066 public:
 1067   enum NodeFlags {
 1068     _last_flag = Node::_last_flag
 1069   };
 1070 };
 1071 
 1072 bool is_CAS(int opcode, bool maybe_volatile);
 1073 
 1074 // predicate controlling translation of CompareAndSwapX
 1075 bool needs_acquiring_load_reserved(const Node *load);
 1076 
 1077 // predicate controlling addressing modes
 1078 bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1079 %}
 1080 
 1081 source %{
 1082 
 1083 // Derived RegMask with conditionally allocatable registers
 1084 
 1085 RegMask _ANY_REG32_mask;
 1086 RegMask _ANY_REG_mask;
 1087 RegMask _PTR_REG_mask;
 1088 RegMask _NO_SPECIAL_REG32_mask;
 1089 RegMask _NO_SPECIAL_REG_mask;
 1090 RegMask _NO_SPECIAL_PTR_REG_mask;
 1091 RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
 1092 
 1093 void reg_mask_init() {
 1094 
 1095   _ANY_REG32_mask = _ALL_REG32_mask;
 1096   _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg()));
 1097 
 1098   _ANY_REG_mask = _ALL_REG_mask;
 1099   _ANY_REG_mask.SUBTRACT(_ZR_REG_mask);
 1100 
 1101   _PTR_REG_mask = _ALL_REG_mask;
 1102   _PTR_REG_mask.SUBTRACT(_ZR_REG_mask);
 1103 
 1104   _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1105   _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1106 
 1107   _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1108   _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1109 
 1110   _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1111   _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1112 
 1113   // x27 is not allocatable when compressed oops is on
 1114   if (UseCompressedOops) {
 1115     _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1116     _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1117     _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
 1118   }
 1119 
 1120   // x8 is not allocatable when PreserveFramePointer is on
 1121   if (PreserveFramePointer) {
 1122     _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1123     _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1124     _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1125   }
 1126 
 1127   _NO_SPECIAL_NO_FP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1128   _NO_SPECIAL_NO_FP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
 1129 }
 1130 
 1131 void PhaseOutput::pd_perform_mach_node_analysis() {
 1132 }
 1133 
 1134 int MachNode::pd_alignment_required() const {
 1135   return 1;
 1136 }
 1137 
 1138 int MachNode::compute_padding(int current_offset) const {
 1139   return 0;
 1140 }
 1141 
 1142 // is_CAS(int opcode, bool maybe_volatile)
 1143 //
 1144 // return true if opcode is one of the possible CompareAndSwapX
 1145 // values otherwise false.
 1146 bool is_CAS(int opcode, bool maybe_volatile)
 1147 {
 1148   switch (opcode) {
 1149     // We handle these
 1150     case Op_CompareAndSwapI:
 1151     case Op_CompareAndSwapL:
 1152     case Op_CompareAndSwapP:
 1153     case Op_CompareAndSwapN:
 1154     case Op_ShenandoahCompareAndSwapP:
 1155     case Op_ShenandoahCompareAndSwapN:
 1156     case Op_CompareAndSwapB:
 1157     case Op_CompareAndSwapS:
 1158     case Op_GetAndSetI:
 1159     case Op_GetAndSetL:
 1160     case Op_GetAndSetP:
 1161     case Op_GetAndSetN:
 1162     case Op_GetAndAddI:
 1163     case Op_GetAndAddL:
 1164       return true;
 1165     case Op_CompareAndExchangeI:
 1166     case Op_CompareAndExchangeN:
 1167     case Op_CompareAndExchangeB:
 1168     case Op_CompareAndExchangeS:
 1169     case Op_CompareAndExchangeL:
 1170     case Op_CompareAndExchangeP:
 1171     case Op_WeakCompareAndSwapB:
 1172     case Op_WeakCompareAndSwapS:
 1173     case Op_WeakCompareAndSwapI:
 1174     case Op_WeakCompareAndSwapL:
 1175     case Op_WeakCompareAndSwapP:
 1176     case Op_WeakCompareAndSwapN:
 1177     case Op_ShenandoahWeakCompareAndSwapP:
 1178     case Op_ShenandoahWeakCompareAndSwapN:
 1179     case Op_ShenandoahCompareAndExchangeP:
 1180     case Op_ShenandoahCompareAndExchangeN:
 1181       return maybe_volatile;
 1182     default:
 1183       return false;
 1184   }
 1185 }
 1186 
 1187 // predicate controlling translation of CAS
 1188 //
 1189 // returns true if CAS needs to use an acquiring load otherwise false
 1190 bool needs_acquiring_load_reserved(const Node *n)
 1191 {
 1192   assert(n != nullptr && is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1193 
 1194   LoadStoreNode* ldst = n->as_LoadStore();
 1195   if (n != nullptr && is_CAS(n->Opcode(), false)) {
 1196     assert(ldst != nullptr && ldst->trailing_membar() != nullptr, "expected trailing membar");
 1197   } else {
 1198     return ldst != nullptr && ldst->trailing_membar() != nullptr;
 1199   }
 1200   // so we can just return true here
 1201   return true;
 1202 }
 1203 #define __ masm->
 1204 
 1205 // advance declarations for helper functions to convert register
 1206 // indices to register objects
 1207 
 1208 // the ad file has to provide implementations of certain methods
 1209 // expected by the generic code
 1210 //
 1211 // REQUIRED FUNCTIONALITY
 1212 
 1213 //=============================================================================
 1214 
 1215 // !!!!! Special hack to get all types of calls to specify the byte offset
 1216 //       from the start of the call to the point where the return address
 1217 //       will point.
 1218 
 1219 int MachCallStaticJavaNode::ret_addr_offset()
 1220 {
 1221   return 3 * NativeInstruction::instruction_size; // auipc + ld + jalr
 1222 }
 1223 
 1224 int MachCallDynamicJavaNode::ret_addr_offset()
 1225 {
 1226   return NativeMovConstReg::movptr2_instruction_size + (3 * NativeInstruction::instruction_size); // movptr2, auipc + ld + jal
 1227 }
 1228 
 1229 int MachCallRuntimeNode::ret_addr_offset() {
 1230   // For address inside the code cache the call will be:
 1231   //   auipc + jalr
 1232   // For real runtime callouts it will be 8 instructions
 1233   // see riscv_enc_java_to_runtime
 1234   //   la(t0, retaddr)                                             ->  auipc + addi
 1235   //   sd(t0, Address(xthread, JavaThread::last_Java_pc_offset())) ->  sd
 1236   //   movptr(t1, addr, offset, t0)                                ->  lui + lui + slli + add
 1237   //   jalr(t1, offset)                                            ->  jalr
 1238   if (CodeCache::contains(_entry_point)) {
 1239     return 2 * NativeInstruction::instruction_size;
 1240   } else {
 1241     return 8 * NativeInstruction::instruction_size;
 1242   }
 1243 }
 1244 
 1245 //
 1246 // Compute padding required for nodes which need alignment
 1247 //
 1248 
 1249 // With RVC a call instruction may get 2-byte aligned.
 1250 // The address of the call instruction needs to be 4-byte aligned to
 1251 // ensure that it does not span a cache line so that it can be patched.
 1252 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
 1253 {
 1254   // to make sure the address of jal 4-byte aligned.
 1255   return align_up(current_offset, alignment_required()) - current_offset;
 1256 }
 1257 
 1258 // With RVC a call instruction may get 2-byte aligned.
 1259 // The address of the call instruction needs to be 4-byte aligned to
 1260 // ensure that it does not span a cache line so that it can be patched.
 1261 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
 1262 {
 1263   // skip the movptr2 in MacroAssembler::ic_call():
 1264   // lui, lui, slli, add, addi
 1265   // Though movptr2() has already 4-byte aligned with or without RVC,
 1266   // We need to prevent from further changes by explicitly calculating the size.
 1267   current_offset += NativeMovConstReg::movptr2_instruction_size;
 1268   // to make sure the address of jal 4-byte aligned.
 1269   return align_up(current_offset, alignment_required()) - current_offset;
 1270 }
 1271 
 1272 //=============================================================================
 1273 
 1274 #ifndef PRODUCT
 1275 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1276   assert_cond(st != nullptr);
 1277   st->print("BREAKPOINT");
 1278 }
 1279 #endif
 1280 
 1281 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1282   __ ebreak();
 1283 }
 1284 
 1285 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1286   return MachNode::size(ra_);
 1287 }
 1288 
 1289 //=============================================================================
 1290 
 1291 #ifndef PRODUCT
 1292   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1293     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1294   }
 1295 #endif
 1296 
 1297   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1298     Assembler::CompressibleRegion cr(masm); // nops shall be 2-byte under RVC for alignment purposes.
 1299     for (int i = 0; i < _count; i++) {
 1300       __ nop();
 1301     }
 1302   }
 1303 
 1304   uint MachNopNode::size(PhaseRegAlloc*) const {
 1305     return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size);
 1306   }
 1307 
 1308 //=============================================================================
 1309 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1310 
 1311 int ConstantTable::calculate_table_base_offset() const {
 1312   return 0;  // absolute addressing, no offset
 1313 }
 1314 
 1315 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1316 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1317   ShouldNotReachHere();
 1318 }
 1319 
 1320 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1321   // Empty encoding
 1322 }
 1323 
 1324 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1325   return 0;
 1326 }
 1327 
 1328 #ifndef PRODUCT
 1329 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1330   assert_cond(st != nullptr);
 1331   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1332 }
 1333 #endif
 1334 
 1335 #ifndef PRODUCT
 1336 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1337   assert_cond(st != nullptr && ra_ != nullptr);
 1338   Compile* C = ra_->C;
 1339 
 1340   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1341 
 1342   if (C->output()->need_stack_bang(framesize)) {
 1343     st->print("# stack bang size=%d\n\t", framesize);
 1344   }
 1345 
 1346   st->print("sd  fp, [sp, #%d]\n\t", - 2 * wordSize);
 1347   st->print("sd  ra, [sp, #%d]\n\t", - wordSize);
 1348   if (PreserveFramePointer) { st->print("sub  fp, sp, #%d\n\t", 2 * wordSize); }
 1349   st->print("sub sp, sp, #%d\n\t", framesize);
 1350 
 1351   if (C->stub_function() == nullptr) {
 1352     st->print("ld  t0, [guard]\n\t");
 1353     st->print("membar LoadLoad\n\t");
 1354     st->print("ld  t1, [xthread, #thread_disarmed_guard_value_offset]\n\t");
 1355     st->print("beq t0, t1, skip\n\t");
 1356     st->print("jalr #nmethod_entry_barrier_stub\n\t");
 1357     st->print("j skip\n\t");
 1358     st->print("guard: int\n\t");
 1359     st->print("skip:\n\t");
 1360   }
 1361 }
 1362 #endif
 1363 
 1364 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1365   assert_cond(ra_ != nullptr);
 1366   Compile* C = ra_->C;
 1367 
 1368   // n.b. frame size includes space for return pc and fp
 1369   const int framesize = C->output()->frame_size_in_bytes();
 1370 
 1371   // insert a nop at the start of the prolog so we can patch in a
 1372   // branch if we need to invalidate the method later
 1373   {
 1374     Assembler::IncompressibleRegion ir(masm);  // keep the nop as 4 bytes for patching.
 1375     MacroAssembler::assert_alignment(__ pc());
 1376     __ nop();  // 4 bytes
 1377   }
 1378 
 1379   assert_cond(C != nullptr);
 1380 
 1381   if (C->clinit_barrier_on_entry()) {
 1382     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1383 
 1384     Label L_skip_barrier;
 1385 
 1386     __ mov_metadata(t1, C->method()->holder()->constant_encoding());
 1387     __ clinit_barrier(t1, t0, &L_skip_barrier);
 1388     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1389     __ bind(L_skip_barrier);
 1390   }
 1391 
 1392   int bangsize = C->output()->bang_size_in_bytes();
 1393   if (C->output()->need_stack_bang(bangsize)) {
 1394     __ generate_stack_overflow_check(bangsize);
 1395   }
 1396 
 1397   __ build_frame(framesize);
 1398 
 1399   if (C->stub_function() == nullptr) {
 1400     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1401     // Dummy labels for just measuring the code size
 1402     Label dummy_slow_path;
 1403     Label dummy_continuation;
 1404     Label dummy_guard;
 1405     Label* slow_path = &dummy_slow_path;
 1406     Label* continuation = &dummy_continuation;
 1407     Label* guard = &dummy_guard;
 1408     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1409       // Use real labels from actual stub when not emitting code for purpose of measuring its size
 1410       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1411       Compile::current()->output()->add_stub(stub);
 1412       slow_path = &stub->entry();
 1413       continuation = &stub->continuation();
 1414       guard = &stub->guard();
 1415     }
 1416     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1417     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1418   }
 1419 
 1420   if (VerifyStackAtCalls) {
 1421     Unimplemented();
 1422   }
 1423 
 1424   C->output()->set_frame_complete(__ offset());
 1425 
 1426   if (C->has_mach_constant_base_node()) {
 1427     // NOTE: We set the table base offset here because users might be
 1428     // emitted before MachConstantBaseNode.
 1429     ConstantTable& constant_table = C->output()->constant_table();
 1430     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1431   }
 1432 }
 1433 
 1434 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1435 {
 1436   assert_cond(ra_ != nullptr);
 1437   return MachNode::size(ra_); // too many variables; just compute it
 1438                               // the hard way
 1439 }
 1440 
 1441 int MachPrologNode::reloc() const
 1442 {
 1443   return 0;
 1444 }
 1445 
 1446 //=============================================================================
 1447 
 1448 #ifndef PRODUCT
 1449 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1450   assert_cond(st != nullptr && ra_ != nullptr);
 1451   Compile* C = ra_->C;
 1452   assert_cond(C != nullptr);
 1453   int framesize = C->output()->frame_size_in_bytes();
 1454 
 1455   st->print("# pop frame %d\n\t", framesize);
 1456 
 1457   if (framesize == 0) {
 1458     st->print("ld  ra, [sp,#%d]\n\t", (2 * wordSize));
 1459     st->print("ld  fp, [sp,#%d]\n\t", (3 * wordSize));
 1460     st->print("add sp, sp, #%d\n\t", (2 * wordSize));
 1461   } else {
 1462     st->print("add  sp, sp, #%d\n\t", framesize);
 1463     st->print("ld  ra, [sp,#%d]\n\t", - 2 * wordSize);
 1464     st->print("ld  fp, [sp,#%d]\n\t", - wordSize);
 1465   }
 1466 
 1467   if (do_polling() && C->is_method_compilation()) {
 1468     st->print("# test polling word\n\t");
 1469     st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset()));
 1470     st->print("bgtu sp, t0, #slow_path");
 1471   }
 1472 }
 1473 #endif
 1474 
 1475 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1476   assert_cond(ra_ != nullptr);
 1477   Compile* C = ra_->C;
 1478   assert_cond(C != nullptr);
 1479   int framesize = C->output()->frame_size_in_bytes();
 1480 
 1481   __ remove_frame(framesize);
 1482 
 1483   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1484     __ reserved_stack_check();
 1485   }
 1486 
 1487   if (do_polling() && C->is_method_compilation()) {
 1488     Label dummy_label;
 1489     Label* code_stub = &dummy_label;
 1490     if (!C->output()->in_scratch_emit_size()) {
 1491       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1492       C->output()->add_stub(stub);
 1493       code_stub = &stub->entry();
 1494     }
 1495     __ relocate(relocInfo::poll_return_type);
 1496     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1497   }
 1498 }
 1499 
 1500 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1501   assert_cond(ra_ != nullptr);
 1502   // Variable size. Determine dynamically.
 1503   return MachNode::size(ra_);
 1504 }
 1505 
 1506 int MachEpilogNode::reloc() const {
 1507   // Return number of relocatable values contained in this instruction.
 1508   return 1; // 1 for polling page.
 1509 }
 1510 const Pipeline * MachEpilogNode::pipeline() const {
 1511   return MachNode::pipeline_class();
 1512 }
 1513 
 1514 //=============================================================================
 1515 
 1516 // Figure out which register class each belongs in: rc_int, rc_float or
 1517 // rc_stack.
 1518 enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack };
 1519 
 1520 static enum RC rc_class(OptoReg::Name reg) {
 1521 
 1522   if (reg == OptoReg::Bad) {
 1523     return rc_bad;
 1524   }
 1525 
 1526   // we have 30 int registers * 2 halves
 1527   // (t0 and t1 are omitted)
 1528   int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2);
 1529   if (reg < slots_of_int_registers) {
 1530     return rc_int;
 1531   }
 1532 
 1533   // we have 32 float register * 2 halves
 1534   int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers;
 1535   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1536     return rc_float;
 1537   }
 1538 
 1539   // we have 32 vector register * 4 halves
 1540   int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers;
 1541   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) {
 1542     return rc_vector;
 1543   }
 1544 
 1545   // Between vector regs & stack is the flags regs.
 1546   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1547 
 1548   return rc_stack;
 1549 }
 1550 
 1551 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1552   assert_cond(ra_ != nullptr);
 1553   Compile* C = ra_->C;
 1554 
 1555   // Get registers to move.
 1556   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1557   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1558   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1559   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1560 
 1561   enum RC src_hi_rc = rc_class(src_hi);
 1562   enum RC src_lo_rc = rc_class(src_lo);
 1563   enum RC dst_hi_rc = rc_class(dst_hi);
 1564   enum RC dst_lo_rc = rc_class(dst_lo);
 1565 
 1566   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1567 
 1568   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1569     assert((src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1570            (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi,
 1571            "expected aligned-adjacent pairs");
 1572   }
 1573 
 1574   if (src_lo == dst_lo && src_hi == dst_hi) {
 1575     return 0;            // Self copy, no move.
 1576   }
 1577 
 1578   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1579               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1580   int src_offset = ra_->reg2offset(src_lo);
 1581   int dst_offset = ra_->reg2offset(dst_lo);
 1582 
 1583   if (bottom_type()->isa_vect() != nullptr) {
 1584     uint ireg = ideal_reg();
 1585     if (ireg == Op_VecA && masm) {
 1586       int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1587       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1588         // stack to stack
 1589         __ spill_copy_vector_stack_to_stack(src_offset, dst_offset,
 1590                                             vector_reg_size_in_bytes);
 1591       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1592         // vpr to stack
 1593         __ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1594       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1595         // stack to vpr
 1596         __ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1597       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1598         // vpr to vpr
 1599         __ vsetvli_helper(T_BYTE, MaxVectorSize);
 1600         __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1601       } else {
 1602         ShouldNotReachHere();
 1603       }
 1604     } else if (bottom_type()->isa_vectmask() && masm) {
 1605       int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
 1606       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1607         // stack to stack
 1608         __ spill_copy_vmask_stack_to_stack(src_offset, dst_offset,
 1609                                            vmask_size_in_bytes);
 1610       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
 1611         // vmask to stack
 1612         __ spill_vmask(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
 1613       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
 1614         // stack to vmask
 1615         __ unspill_vmask(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
 1616       } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
 1617         // vmask to vmask
 1618         __ vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
 1619         __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
 1620       } else {
 1621         ShouldNotReachHere();
 1622       }
 1623     }
 1624   } else if (masm != nullptr) {
 1625     switch (src_lo_rc) {
 1626       case rc_int:
 1627         if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 1628           if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass
 1629             __ zext(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32);
 1630           } else {
 1631             __ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]));
 1632           }
 1633         } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 1634           if (is64) {
 1635             __ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1636                        as_Register(Matcher::_regEncode[src_lo]));
 1637           } else {
 1638             __ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1639                        as_Register(Matcher::_regEncode[src_lo]));
 1640           }
 1641         } else {                    // gpr --> stack spill
 1642           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1643           __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 1644         }
 1645         break;
 1646       case rc_float:
 1647         if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 1648           if (is64) {
 1649             __ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]),
 1650                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1651           } else {
 1652             __ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]),
 1653                        as_FloatRegister(Matcher::_regEncode[src_lo]));
 1654           }
 1655         } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 1656           if (is64) {
 1657             __ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1658                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1659           } else {
 1660             __ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1661                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 1662           }
 1663         } else {                    // fpr --> stack spill
 1664           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1665           __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 1666                    is64, dst_offset);
 1667         }
 1668         break;
 1669       case rc_stack:
 1670         if (dst_lo_rc == rc_int) {  // stack --> gpr load
 1671           if (this->ideal_reg() == Op_RegI) {
 1672             __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1673           } else { // // zero extended for narrow oop or klass
 1674             __ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 1675           }
 1676         } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 1677           __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1678                      is64, src_offset);
 1679         } else {                    // stack --> stack copy
 1680           assert(dst_lo_rc == rc_stack, "spill to bad register class");
 1681           if (this->ideal_reg() == Op_RegI) {
 1682             __ unspill(t0, is64, src_offset);
 1683           } else { // zero extended for narrow oop or klass
 1684             __ unspillu(t0, is64, src_offset);
 1685           }
 1686           __ spill(t0, is64, dst_offset);
 1687         }
 1688         break;
 1689       default:
 1690         ShouldNotReachHere();
 1691     }
 1692   }
 1693 
 1694   if (st != nullptr) {
 1695     st->print("spill ");
 1696     if (src_lo_rc == rc_stack) {
 1697       st->print("[sp, #%d] -> ", src_offset);
 1698     } else {
 1699       st->print("%s -> ", Matcher::regName[src_lo]);
 1700     }
 1701     if (dst_lo_rc == rc_stack) {
 1702       st->print("[sp, #%d]", dst_offset);
 1703     } else {
 1704       st->print("%s", Matcher::regName[dst_lo]);
 1705     }
 1706     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1707       int vsize = 0;
 1708       if (ideal_reg() == Op_VecA) {
 1709         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 1710       } else {
 1711         ShouldNotReachHere();
 1712       }
 1713       st->print("\t# vector spill size = %d", vsize);
 1714     } else if (ideal_reg() == Op_RegVectMask) {
 1715       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 1716       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 1717       st->print("\t# vmask spill size = %d", vsize);
 1718     } else {
 1719       st->print("\t# spill size = %d", is64 ? 64 : 32);
 1720     }
 1721   }
 1722 
 1723   return 0;
 1724 }
 1725 
 1726 #ifndef PRODUCT
 1727 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1728   if (ra_ == nullptr) {
 1729     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 1730   } else {
 1731     implementation(nullptr, ra_, false, st);
 1732   }
 1733 }
 1734 #endif
 1735 
 1736 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1737   implementation(masm, ra_, false, nullptr);
 1738 }
 1739 
 1740 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 1741   return MachNode::size(ra_);
 1742 }
 1743 
 1744 //=============================================================================
 1745 
 1746 #ifndef PRODUCT
 1747 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1748   assert_cond(ra_ != nullptr && st != nullptr);
 1749   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1750   int reg = ra_->get_reg_first(this);
 1751   st->print("add %s, sp, #%d\t# box lock",
 1752             Matcher::regName[reg], offset);
 1753 }
 1754 #endif
 1755 
 1756 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1757   Assembler::IncompressibleRegion ir(masm);  // Fixed length: see BoxLockNode::size()
 1758 
 1759   assert_cond(ra_ != nullptr);
 1760   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1761   int reg    = ra_->get_encode(this);
 1762 
 1763   if (Assembler::is_simm12(offset)) {
 1764     __ addi(as_Register(reg), sp, offset);
 1765   } else {
 1766     __ li32(t0, offset);
 1767     __ add(as_Register(reg), sp, t0);
 1768   }
 1769 }
 1770 
 1771 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 1772   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 1773   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 1774 
 1775   if (Assembler::is_simm12(offset)) {
 1776     return NativeInstruction::instruction_size;
 1777   } else {
 1778     return 3 * NativeInstruction::instruction_size; // lui + addiw + add;
 1779   }
 1780 }
 1781 
 1782 //=============================================================================
 1783 
 1784 #ifndef PRODUCT
 1785 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 1786 {
 1787   assert_cond(st != nullptr);
 1788   st->print_cr("# MachUEPNode");
 1789   if (UseCompressedClassPointers) {
 1790     st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 1791     st->print_cr("\tlwu t2, [t0      + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 1792   } else {
 1793     st->print_cr("\tld t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 1794     st->print_cr("\tld t2, [t0      + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 1795   }
 1796   st->print_cr("\tbeq t1, t2, ic_hit");
 1797   st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
 1798   st->print_cr("\tic_hit:");
 1799 }
 1800 #endif
 1801 
 1802 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 1803 {
 1804   // This is the unverified entry point.
 1805   __ ic_check(CodeEntryAlignment);
 1806 
 1807   // Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
 1808   // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
 1809   assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
 1810 }
 1811 
 1812 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 1813 {
 1814   assert_cond(ra_ != nullptr);
 1815   return MachNode::size(ra_);
 1816 }
 1817 
 1818 // REQUIRED EMIT CODE
 1819 
 1820 //=============================================================================
 1821 
 1822 // Emit exception handler code.
 1823 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 1824 {
 1825   // auipc t1, #exception_blob_entry_point
 1826   // jr (offset)t1
 1827   // Note that the code buffer's insts_mark is always relative to insts.
 1828   // That's why we must use the macroassembler to generate a handler.
 1829   address base = __ start_a_stub(size_exception_handler());
 1830   if (base == nullptr) {
 1831     ciEnv::current()->record_failure("CodeCache is full");
 1832     return 0;  // CodeBuffer::expand failed
 1833   }
 1834   int offset = __ offset();
 1835   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 1836   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 1837   __ end_a_stub();
 1838   return offset;
 1839 }
 1840 
 1841 // Emit deopt handler code.
 1842 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 1843 {
 1844   address base = __ start_a_stub(size_deopt_handler());
 1845   if (base == nullptr) {
 1846     ciEnv::current()->record_failure("CodeCache is full");
 1847     return 0;  // CodeBuffer::expand failed
 1848   }
 1849   int offset = __ offset();
 1850 
 1851   __ auipc(ra, 0);
 1852   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 1853 
 1854   assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
 1855   __ end_a_stub();
 1856   return offset;
 1857 
 1858 }
 1859 // REQUIRED MATCHER CODE
 1860 
 1861 //=============================================================================
 1862 
 1863 bool Matcher::match_rule_supported(int opcode) {
 1864   if (!has_match_rule(opcode)) {
 1865     return false;
 1866   }
 1867 
 1868   switch (opcode) {
 1869     case Op_OnSpinWait:
 1870       return VM_Version::supports_on_spin_wait();
 1871     case Op_CacheWB:           // fall through
 1872     case Op_CacheWBPreSync:    // fall through
 1873     case Op_CacheWBPostSync:
 1874       if (!VM_Version::supports_data_cache_line_flush()) {
 1875         return false;
 1876       }
 1877       break;
 1878 
 1879     case Op_StrCompressedCopy: // fall through
 1880     case Op_StrInflatedCopy:   // fall through
 1881     case Op_CountPositives:    // fall through
 1882     case Op_EncodeISOArray:
 1883       return UseRVV;
 1884 
 1885     // Current test shows that, it brings performance gain when MaxVectorSize >= 32, but brings
 1886     // regression when MaxVectorSize == 16. So only enable the intrinsic when MaxVectorSize >= 32.
 1887     case Op_RoundVF:
 1888       return UseRVV && MaxVectorSize >= 32;
 1889 
 1890     // For double, current test shows that even with MaxVectorSize == 32, there is still some regression.
 1891     // Although there is no hardware to verify it for now, from the trend of performance data on hardwares
 1892     // (with vlenb == 16 and 32 respectively), it's promising to bring better performance rather than
 1893     // regression for double when MaxVectorSize == 64+. So only enable the intrinsic when MaxVectorSize >= 64.
 1894     case Op_RoundVD:
 1895       return UseRVV && MaxVectorSize >= 64;
 1896 
 1897     case Op_PopCountI:
 1898     case Op_PopCountL:
 1899       return UsePopCountInstruction;
 1900 
 1901     case Op_ReverseI:
 1902     case Op_ReverseL:
 1903       return UseZbkb;
 1904 
 1905     case Op_ReverseBytesI:
 1906     case Op_ReverseBytesL:
 1907     case Op_ReverseBytesS:
 1908     case Op_ReverseBytesUS:
 1909     case Op_RotateRight:
 1910     case Op_RotateLeft:
 1911     case Op_CountLeadingZerosI:
 1912     case Op_CountLeadingZerosL:
 1913     case Op_CountTrailingZerosI:
 1914     case Op_CountTrailingZerosL:
 1915       return UseZbb;
 1916 
 1917     case Op_FmaF:
 1918     case Op_FmaD:
 1919     case Op_FmaVF:
 1920     case Op_FmaVD:
 1921       return UseFMA;
 1922 
 1923     case Op_ConvHF2F:
 1924     case Op_ConvF2HF:
 1925       return VM_Version::supports_float16_float_conversion();
 1926     case Op_ReinterpretS2HF:
 1927     case Op_ReinterpretHF2S:
 1928       return UseZfh || UseZfhmin;
 1929     case Op_AddHF:
 1930     case Op_DivHF:
 1931     case Op_FmaHF:
 1932     case Op_MaxHF:
 1933     case Op_MinHF:
 1934     case Op_MulHF:
 1935     case Op_SubHF:
 1936     case Op_SqrtHF:
 1937       return UseZfh;
 1938 
 1939     case Op_CMoveF:
 1940     case Op_CMoveD:
 1941     case Op_CMoveP:
 1942     case Op_CMoveN:
 1943       return false;
 1944   }
 1945 
 1946   return true; // Per default match rules are supported.
 1947 }
 1948 
 1949 const RegMask* Matcher::predicate_reg_mask(void) {
 1950   return &_VMASK_REG_mask;
 1951 }
 1952 
 1953 // Vector calling convention not yet implemented.
 1954 bool Matcher::supports_vector_calling_convention(void) {
 1955   return EnableVectorSupport;
 1956 }
 1957 
 1958 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 1959   assert(EnableVectorSupport, "sanity");
 1960   assert(ideal_reg == Op_VecA, "sanity");
 1961   // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
 1962   int lo = V8_num;
 1963   int hi = V8_K_num;
 1964   return OptoRegPair(hi, lo);
 1965 }
 1966 
 1967 // Is this branch offset short enough that a short branch can be used?
 1968 //
 1969 // NOTE: If the platform does not provide any short branch variants, then
 1970 //       this method should return false for offset 0.
 1971 // |---label(L1)-----|
 1972 // |-----------------|
 1973 // |-----------------|----------eq: float-------------------
 1974 // |-----------------| // far_cmpD_branch   |   cmpD_branch
 1975 // |------- ---------|    feq;              |      feq;
 1976 // |-far_cmpD_branch-|    beqz done;        |      bnez L;
 1977 // |-----------------|    j L;              |
 1978 // |-----------------|    bind(done);       |
 1979 // |-----------------|--------------------------------------
 1980 // |-----------------| // so shortBrSize = br_size - 4;
 1981 // |-----------------| // so offs = offset - shortBrSize + 4;
 1982 // |---label(L2)-----|
 1983 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 1984   // The passed offset is relative to address of the branch.
 1985   int shortBrSize = br_size - 4;
 1986   int offs = offset - shortBrSize + 4;
 1987   return (-4096 <= offs && offs < 4096);
 1988 }
 1989 
 1990 // Vector width in bytes.
 1991 int Matcher::vector_width_in_bytes(BasicType bt) {
 1992   if (UseRVV) {
 1993     // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
 1994     // MaxVectorSize == VM_Version::_initial_vector_length
 1995     int size = MaxVectorSize;
 1996     // Minimum 2 values in vector
 1997     if (size < 2 * type2aelembytes(bt)) size = 0;
 1998     // But never < 4
 1999     if (size < 4) size = 0;
 2000     return size;
 2001   }
 2002   return 0;
 2003 }
 2004 
 2005 // Limits on vector size (number of elements) loaded into vector.
 2006 int Matcher::max_vector_size(const BasicType bt) {
 2007   return vector_width_in_bytes(bt) / type2aelembytes(bt);
 2008 }
 2009 
 2010 int Matcher::min_vector_size(const BasicType bt) {
 2011   int max_size = max_vector_size(bt);
 2012   // Limit the min vector size to 8 bytes.
 2013   int size = 8 / type2aelembytes(bt);
 2014   if (bt == T_BYTE) {
 2015     // To support vector api shuffle/rearrange.
 2016     size = 4;
 2017   } else if (bt == T_BOOLEAN) {
 2018     // To support vector api load/store mask.
 2019     size = 2;
 2020   }
 2021   if (size < 2) size = 2;
 2022   return MIN2(size, max_size);
 2023 }
 2024 
 2025 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2026   return Matcher::max_vector_size(bt);
 2027 }
 2028 
 2029 // Vector ideal reg.
 2030 uint Matcher::vector_ideal_reg(int len) {
 2031   assert(MaxVectorSize >= len, "");
 2032   if (UseRVV) {
 2033     return Op_VecA;
 2034   }
 2035 
 2036   ShouldNotReachHere();
 2037   return 0;
 2038 }
 2039 
 2040 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2041   return Matcher::max_vector_size(bt);
 2042 }
 2043 
 2044 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
 2045   ShouldNotReachHere(); // generic vector operands not supported
 2046   return nullptr;
 2047 }
 2048 
 2049 bool Matcher::is_reg2reg_move(MachNode* m) {
 2050   ShouldNotReachHere(); // generic vector operands not supported
 2051   return false;
 2052 }
 2053 
 2054 bool Matcher::is_generic_vector(MachOper* opnd) {
 2055   ShouldNotReachHere(); // generic vector operands not supported
 2056   return false;
 2057 }
 2058 
 2059 // Return whether or not this register is ever used as an argument.
 2060 // This function is used on startup to build the trampoline stubs in
 2061 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2062 // call in the trampoline, and arguments in those registers not be
 2063 // available to the callee.
 2064 bool Matcher::can_be_java_arg(int reg)
 2065 {
 2066   return
 2067     reg ==  R10_num || reg == R10_H_num ||
 2068     reg ==  R11_num || reg == R11_H_num ||
 2069     reg ==  R12_num || reg == R12_H_num ||
 2070     reg ==  R13_num || reg == R13_H_num ||
 2071     reg ==  R14_num || reg == R14_H_num ||
 2072     reg ==  R15_num || reg == R15_H_num ||
 2073     reg ==  R16_num || reg == R16_H_num ||
 2074     reg ==  R17_num || reg == R17_H_num ||
 2075     reg ==  F10_num || reg == F10_H_num ||
 2076     reg ==  F11_num || reg == F11_H_num ||
 2077     reg ==  F12_num || reg == F12_H_num ||
 2078     reg ==  F13_num || reg == F13_H_num ||
 2079     reg ==  F14_num || reg == F14_H_num ||
 2080     reg ==  F15_num || reg == F15_H_num ||
 2081     reg ==  F16_num || reg == F16_H_num ||
 2082     reg ==  F17_num || reg == F17_H_num;
 2083 }
 2084 
 2085 bool Matcher::is_spillable_arg(int reg)
 2086 {
 2087   return can_be_java_arg(reg);
 2088 }
 2089 
 2090 uint Matcher::int_pressure_limit()
 2091 {
 2092   // A derived pointer is live at CallNode and then is flagged by RA
 2093   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2094   // derived pointers and lastly fail to spill after reaching maximum
 2095   // number of iterations. Lowering the default pressure threshold to
 2096   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2097   // a high register pressure area of the code so that split_DEF can
 2098   // generate DefinitionSpillCopy for the derived pointer.
 2099   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2100   if (!PreserveFramePointer) {
 2101     // When PreserveFramePointer is off, frame pointer is allocatable,
 2102     // but different from other SOC registers, it is excluded from
 2103     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2104     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2105     // See check_pressure_at_fatproj().
 2106     default_int_pressure_threshold--;
 2107   }
 2108   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2109 }
 2110 
 2111 uint Matcher::float_pressure_limit()
 2112 {
 2113   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2114   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2115 }
 2116 
 2117 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2118   return false;
 2119 }
 2120 
 2121 RegMask Matcher::divI_proj_mask() {
 2122   ShouldNotReachHere();
 2123   return RegMask();
 2124 }
 2125 
 2126 // Register for MODI projection of divmodI.
 2127 RegMask Matcher::modI_proj_mask() {
 2128   ShouldNotReachHere();
 2129   return RegMask();
 2130 }
 2131 
 2132 // Register for DIVL projection of divmodL.
 2133 RegMask Matcher::divL_proj_mask() {
 2134   ShouldNotReachHere();
 2135   return RegMask();
 2136 }
 2137 
 2138 // Register for MODL projection of divmodL.
 2139 RegMask Matcher::modL_proj_mask() {
 2140   ShouldNotReachHere();
 2141   return RegMask();
 2142 }
 2143 
 2144 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2145   return FP_REG_mask();
 2146 }
 2147 
 2148 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2149   assert_cond(addp != nullptr);
 2150   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2151     Node* u = addp->fast_out(i);
 2152     if (u != nullptr && u->is_Mem()) {
 2153       int opsize = u->as_Mem()->memory_size();
 2154       assert(opsize > 0, "unexpected memory operand size");
 2155       if (u->as_Mem()->memory_size() != (1 << shift)) {
 2156         return false;
 2157       }
 2158     }
 2159   }
 2160   return true;
 2161 }
 2162 
 2163 // Binary src (Replicate scalar/immediate)
 2164 static bool is_vector_scalar_bitwise_pattern(Node* n, Node* m) {
 2165   if (n == nullptr || m == nullptr) {
 2166     return false;
 2167   }
 2168 
 2169   if (m->Opcode() != Op_Replicate) {
 2170     return false;
 2171   }
 2172 
 2173   switch (n->Opcode()) {
 2174     case Op_AndV:
 2175     case Op_OrV:
 2176     case Op_XorV:
 2177     case Op_AddVB:
 2178     case Op_AddVS:
 2179     case Op_AddVI:
 2180     case Op_AddVL:
 2181     case Op_SubVB:
 2182     case Op_SubVS:
 2183     case Op_SubVI:
 2184     case Op_SubVL:
 2185     case Op_MulVB:
 2186     case Op_MulVS:
 2187     case Op_MulVI:
 2188     case Op_MulVL: {
 2189       return true;
 2190     }
 2191     default:
 2192       return false;
 2193   }
 2194 }
 2195 
 2196 // (XorV src (Replicate m1))
 2197 // (XorVMask src (MaskAll m1))
 2198 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2199   if (n != nullptr && m != nullptr) {
 2200     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2201            VectorNode::is_all_ones_vector(m);
 2202   }
 2203   return false;
 2204 }
 2205 
 2206 // Should the Matcher clone input 'm' of node 'n'?
 2207 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2208   assert_cond(m != nullptr);
 2209   if (is_vshift_con_pattern(n, m) || // ShiftV src (ShiftCntV con)
 2210       is_vector_bitwise_not_pattern(n, m) ||
 2211       is_vector_scalar_bitwise_pattern(n, m) ||
 2212       is_encode_and_store_pattern(n, m)) {
 2213     mstack.push(m, Visit);
 2214     return true;
 2215   }
 2216   return false;
 2217 }
 2218 
 2219 // Should the Matcher clone shifts on addressing modes, expecting them
 2220 // to be subsumed into complex addressing expressions or compute them
 2221 // into registers?
 2222 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2223   return clone_base_plus_offset_address(m, mstack, address_visited);
 2224 }
 2225 
 2226 %}
 2227 
 2228 
 2229 
 2230 //----------ENCODING BLOCK-----------------------------------------------------
 2231 // This block specifies the encoding classes used by the compiler to
 2232 // output byte streams.  Encoding classes are parameterized macros
 2233 // used by Machine Instruction Nodes in order to generate the bit
 2234 // encoding of the instruction.  Operands specify their base encoding
 2235 // interface with the interface keyword.  There are currently
 2236 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2237 // COND_INTER.  REG_INTER causes an operand to generate a function
 2238 // which returns its register number when queried.  CONST_INTER causes
 2239 // an operand to generate a function which returns the value of the
 2240 // constant when queried.  MEMORY_INTER causes an operand to generate
 2241 // four functions which return the Base Register, the Index Register,
 2242 // the Scale Value, and the Offset Value of the operand when queried.
 2243 // COND_INTER causes an operand to generate six functions which return
 2244 // the encoding code (ie - encoding bits for the instruction)
 2245 // associated with each basic boolean condition for a conditional
 2246 // instruction.
 2247 //
 2248 // Instructions specify two basic values for encoding.  Again, a
 2249 // function is available to check if the constant displacement is an
 2250 // oop. They use the ins_encode keyword to specify their encoding
 2251 // classes (which must be a sequence of enc_class names, and their
 2252 // parameters, specified in the encoding block), and they use the
 2253 // opcode keyword to specify, in order, their primary, secondary, and
 2254 // tertiary opcode.  Only the opcode sections which a particular
 2255 // instruction needs for encoding need to be specified.
 2256 encode %{
 2257   // BEGIN Non-volatile memory access
 2258 
 2259   enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
 2260     int64_t con = (int64_t)$src$$constant;
 2261     Register dst_reg = as_Register($dst$$reg);
 2262     __ mv(dst_reg, con);
 2263   %}
 2264 
 2265   enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
 2266     Register dst_reg = as_Register($dst$$reg);
 2267     address con = (address)$src$$constant;
 2268     if (con == nullptr || con == (address)1) {
 2269       ShouldNotReachHere();
 2270     } else {
 2271       relocInfo::relocType rtype = $src->constant_reloc();
 2272       if (rtype == relocInfo::oop_type) {
 2273         __ movoop(dst_reg, (jobject)con);
 2274       } else if (rtype == relocInfo::metadata_type) {
 2275         __ mov_metadata(dst_reg, (Metadata*)con);
 2276       } else {
 2277         assert(rtype == relocInfo::none, "unexpected reloc type");
 2278         __ mv(dst_reg, $src$$constant);
 2279       }
 2280     }
 2281   %}
 2282 
 2283   enc_class riscv_enc_mov_p1(iRegP dst) %{
 2284     Register dst_reg = as_Register($dst$$reg);
 2285     __ mv(dst_reg, 1);
 2286   %}
 2287 
 2288   enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{
 2289     __ load_byte_map_base($dst$$Register);
 2290   %}
 2291 
 2292   enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
 2293     Register dst_reg = as_Register($dst$$reg);
 2294     address con = (address)$src$$constant;
 2295     if (con == nullptr) {
 2296       ShouldNotReachHere();
 2297     } else {
 2298       relocInfo::relocType rtype = $src->constant_reloc();
 2299       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 2300       __ set_narrow_oop(dst_reg, (jobject)con);
 2301     }
 2302   %}
 2303 
 2304   enc_class riscv_enc_mov_zero(iRegNorP dst) %{
 2305     Register dst_reg = as_Register($dst$$reg);
 2306     __ mv(dst_reg, zr);
 2307   %}
 2308 
 2309   enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
 2310     Register dst_reg = as_Register($dst$$reg);
 2311     address con = (address)$src$$constant;
 2312     if (con == nullptr) {
 2313       ShouldNotReachHere();
 2314     } else {
 2315       relocInfo::relocType rtype = $src->constant_reloc();
 2316       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 2317       __ set_narrow_klass(dst_reg, (Klass *)con);
 2318     }
 2319   %}
 2320 
 2321   enc_class riscv_enc_cmpxchgw(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2322     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 2323                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2324                /*result as bool*/ true);
 2325   %}
 2326 
 2327   enc_class riscv_enc_cmpxchgn(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2328     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 2329                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2330                /*result as bool*/ true);
 2331   %}
 2332 
 2333   enc_class riscv_enc_cmpxchg(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
 2334     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 2335                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 2336                /*result as bool*/ true);
 2337   %}
 2338 
 2339   enc_class riscv_enc_cmpxchgw_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2340     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 2341                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2342                /*result as bool*/ true);
 2343   %}
 2344 
 2345   enc_class riscv_enc_cmpxchgn_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
 2346     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 2347                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2348                /*result as bool*/ true);
 2349   %}
 2350 
 2351   enc_class riscv_enc_cmpxchg_acq(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
 2352     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 2353                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 2354                /*result as bool*/ true);
 2355   %}
 2356 
 2357   // compare and branch instruction encodings
 2358 
 2359   enc_class riscv_enc_j(label lbl) %{
 2360     Label* L = $lbl$$label;
 2361     __ j(*L);
 2362   %}
 2363 
 2364   enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
 2365     Label* L = $lbl$$label;
 2366     switch ($cmp$$cmpcode) {
 2367       case(BoolTest::ge):
 2368         __ j(*L);
 2369         break;
 2370       case(BoolTest::lt):
 2371         break;
 2372       default:
 2373         Unimplemented();
 2374     }
 2375   %}
 2376 
 2377   // call instruction encodings
 2378 
 2379   enc_class riscv_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) %{
 2380     Register sub_reg = as_Register($sub$$reg);
 2381     Register super_reg = as_Register($super$$reg);
 2382     Register temp_reg = as_Register($temp$$reg);
 2383     Register result_reg = as_Register($result$$reg);
 2384     Register cr_reg = t1;
 2385 
 2386     Label miss;
 2387     Label done;
 2388     __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 2389                                      nullptr, &miss, /*set_cond_codes*/ true);
 2390     if ($primary) {
 2391       __ mv(result_reg, zr);
 2392     } else {
 2393       __ mv(cr_reg, zr);
 2394       __ j(done);
 2395     }
 2396 
 2397     __ bind(miss);
 2398     if (!$primary) {
 2399       __ mv(cr_reg, 1);
 2400     }
 2401 
 2402     __ bind(done);
 2403   %}
 2404 
 2405   enc_class riscv_enc_java_static_call(method meth) %{
 2406     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2407 
 2408     address addr = (address)$meth$$method;
 2409     address call = nullptr;
 2410     assert_cond(addr != nullptr);
 2411     if (!_method) {
 2412       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 2413       call = __ reloc_call(Address(addr, relocInfo::runtime_call_type));
 2414       if (call == nullptr) {
 2415         ciEnv::current()->record_failure("CodeCache is full");
 2416         return;
 2417       }
 2418     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 2419       // The NOP here is purely to ensure that eliding a call to
 2420       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 2421       __ nop();
 2422       __ nop();
 2423       __ nop();
 2424       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 2425     } else {
 2426       int method_index = resolved_method_index(masm);
 2427       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 2428                                                   : static_call_Relocation::spec(method_index);
 2429       call = __ reloc_call(Address(addr, rspec));
 2430       if (call == nullptr) {
 2431         ciEnv::current()->record_failure("CodeCache is full");
 2432         return;
 2433       }
 2434 
 2435       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 2436         // Calls of the same statically bound method can share
 2437         // a stub to the interpreter.
 2438         __ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
 2439       } else {
 2440         // Emit stub for static call
 2441         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 2442         if (stub == nullptr) {
 2443           ciEnv::current()->record_failure("CodeCache is full");
 2444           return;
 2445         }
 2446       }
 2447     }
 2448 
 2449     __ post_call_nop();
 2450   %}
 2451 
 2452   enc_class riscv_enc_java_dynamic_call(method meth) %{
 2453     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2454     int method_index = resolved_method_index(masm);
 2455     address call = __ ic_call((address)$meth$$method, method_index);
 2456     if (call == nullptr) {
 2457       ciEnv::current()->record_failure("CodeCache is full");
 2458       return;
 2459     }
 2460 
 2461     __ post_call_nop();
 2462   %}
 2463 
 2464   enc_class riscv_enc_call_epilog() %{
 2465     if (VerifyStackAtCalls) {
 2466       // Check that stack depth is unchanged: find majik cookie on stack
 2467       __ call_Unimplemented();
 2468     }
 2469   %}
 2470 
 2471   enc_class riscv_enc_java_to_runtime(method meth) %{
 2472     Assembler::IncompressibleRegion ir(masm);  // Fixed length: see ret_addr_offset
 2473 
 2474     // Some calls to generated routines (arraycopy code) are scheduled by C2
 2475     // as runtime calls. if so we can call them using a far call (they will be
 2476     // in the code cache, thus in a reachable segment) otherwise we have to use
 2477     // a movptr+jalr pair which loads the absolute address into a register.
 2478     address entry = (address)$meth$$method;
 2479     if (CodeCache::contains(entry)) {
 2480       __ far_call(Address(entry, relocInfo::runtime_call_type));
 2481       __ post_call_nop();
 2482     } else {
 2483       Label retaddr;
 2484       // Make the anchor frame walkable
 2485       __ la(t0, retaddr);
 2486       __ sd(t0, Address(xthread, JavaThread::last_Java_pc_offset()));
 2487       int32_t offset = 0;
 2488       // No relocation needed
 2489       __ movptr(t1, entry, offset, t0); // lui + lui + slli + add
 2490       __ jalr(t1, offset);
 2491       __ bind(retaddr);
 2492       __ post_call_nop();
 2493     }
 2494   %}
 2495 
 2496   enc_class riscv_enc_tail_call(iRegP jump_target) %{
 2497     Register target_reg = as_Register($jump_target$$reg);
 2498     __ jr(target_reg);
 2499   %}
 2500 
 2501   enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
 2502     Register target_reg = as_Register($jump_target$$reg);
 2503     // exception oop should be in x10
 2504     // ret addr has been popped into ra
 2505     // callee expects it in x13
 2506     __ mv(x13, ra);
 2507     __ jr(target_reg);
 2508   %}
 2509 
 2510   enc_class riscv_enc_rethrow() %{
 2511     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 2512   %}
 2513 
 2514   enc_class riscv_enc_ret() %{
 2515     __ ret();
 2516   %}
 2517 
 2518 %}
 2519 
 2520 //----------FRAME--------------------------------------------------------------
 2521 // Definition of frame structure and management information.
 2522 //
 2523 //  S T A C K   L A Y O U T    Allocators stack-slot number
 2524 //                             |   (to get allocators register number
 2525 //  G  Owned by    |        |  v    add OptoReg::stack0())
 2526 //  r   CALLER     |        |
 2527 //  o     |        +--------+      pad to even-align allocators stack-slot
 2528 //  w     V        |  pad0  |        numbers; owned by CALLER
 2529 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 2530 //  h     ^        |   in   |  5
 2531 //        |        |  args  |  4   Holes in incoming args owned by SELF
 2532 //  |     |        |        |  3
 2533 //  |     |        +--------+
 2534 //  V     |        | old out|      Empty on Intel, window on Sparc
 2535 //        |    old |preserve|      Must be even aligned.
 2536 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 2537 //        |        |   in   |  3   area for Intel ret address
 2538 //     Owned by    |preserve|      Empty on Sparc.
 2539 //       SELF      +--------+
 2540 //        |        |  pad2  |  2   pad to align old SP
 2541 //        |        +--------+  1
 2542 //        |        | locks  |  0
 2543 //        |        +--------+----> OptoReg::stack0(), even aligned
 2544 //        |        |  pad1  | 11   pad to align new SP
 2545 //        |        +--------+
 2546 //        |        |        | 10
 2547 //        |        | spills |  9   spills
 2548 //        V        |        |  8   (pad0 slot for callee)
 2549 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 2550 //        ^        |  out   |  7
 2551 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 2552 //     Owned by    +--------+
 2553 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 2554 //        |    new |preserve|      Must be even-aligned.
 2555 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 2556 //        |        |        |
 2557 //
 2558 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 2559 //         known from SELF's arguments and the Java calling convention.
 2560 //         Region 6-7 is determined per call site.
 2561 // Note 2: If the calling convention leaves holes in the incoming argument
 2562 //         area, those holes are owned by SELF.  Holes in the outgoing area
 2563 //         are owned by the CALLEE.  Holes should not be necessary in the
 2564 //         incoming area, as the Java calling convention is completely under
 2565 //         the control of the AD file.  Doubles can be sorted and packed to
 2566 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 2567 //         varargs C calling conventions.
 2568 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 2569 //         even aligned with pad0 as needed.
 2570 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 2571 //           (the latter is true on Intel but is it false on RISCV?)
 2572 //         region 6-11 is even aligned; it may be padded out more so that
 2573 //         the region from SP to FP meets the minimum stack alignment.
 2574 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 2575 //         alignment.  Region 11, pad1, may be dynamically extended so that
 2576 //         SP meets the minimum alignment.
 2577 
 2578 frame %{
 2579   // These three registers define part of the calling convention
 2580   // between compiled code and the interpreter.
 2581 
 2582   // Inline Cache Register or methodOop for I2C.
 2583   inline_cache_reg(R31);
 2584 
 2585   // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
 2586   cisc_spilling_operand_name(indOffset);
 2587 
 2588   // Number of stack slots consumed by locking an object
 2589   // generate Compile::sync_stack_slots
 2590   // VMRegImpl::slots_per_word = wordSize / stack_slot_size = 8 / 4 = 2
 2591   sync_stack_slots(1 * VMRegImpl::slots_per_word);
 2592 
 2593   // Compiled code's Frame Pointer
 2594   frame_pointer(R2);
 2595 
 2596   // Interpreter stores its frame pointer in a register which is
 2597   // stored to the stack by I2CAdaptors.
 2598   // I2CAdaptors convert from interpreted java to compiled java.
 2599   interpreter_frame_pointer(R8);
 2600 
 2601   // Stack alignment requirement
 2602   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 2603 
 2604   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 2605   // for calls to C.  Supports the var-args backing area for register parms.
 2606   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes / BytesPerInt);
 2607 
 2608   // The after-PROLOG location of the return address.  Location of
 2609   // return address specifies a type (REG or STACK) and a number
 2610   // representing the register number (i.e. - use a register name) or
 2611   // stack slot.
 2612   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 2613   // Otherwise, it is above the locks and verification slot and alignment word
 2614   // TODO this may well be correct but need to check why that - 2 is there
 2615   // ppc port uses 0 but we definitely need to allow for fixed_slots
 2616   // which folds in the space used for monitors
 2617   return_addr(STACK - 2 +
 2618               align_up((Compile::current()->in_preserve_stack_slots() +
 2619                         Compile::current()->fixed_slots()),
 2620                        stack_alignment_in_slots()));
 2621 
 2622   // Location of compiled Java return values.  Same as C for now.
 2623   return_value
 2624   %{
 2625     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 2626            "only return normal values");
 2627 
 2628     static const int lo[Op_RegL + 1] = { // enum name
 2629       0,                                 // Op_Node
 2630       0,                                 // Op_Set
 2631       R10_num,                           // Op_RegN
 2632       R10_num,                           // Op_RegI
 2633       R10_num,                           // Op_RegP
 2634       F10_num,                           // Op_RegF
 2635       F10_num,                           // Op_RegD
 2636       R10_num                            // Op_RegL
 2637     };
 2638 
 2639     static const int hi[Op_RegL + 1] = { // enum name
 2640       0,                                 // Op_Node
 2641       0,                                 // Op_Set
 2642       OptoReg::Bad,                      // Op_RegN
 2643       OptoReg::Bad,                      // Op_RegI
 2644       R10_H_num,                         // Op_RegP
 2645       OptoReg::Bad,                      // Op_RegF
 2646       F10_H_num,                         // Op_RegD
 2647       R10_H_num                          // Op_RegL
 2648     };
 2649 
 2650     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 2651   %}
 2652 %}
 2653 
 2654 //----------ATTRIBUTES---------------------------------------------------------
 2655 //----------Operand Attributes-------------------------------------------------
 2656 op_attrib op_cost(1);        // Required cost attribute
 2657 
 2658 //----------Instruction Attributes---------------------------------------------
 2659 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
 2660 ins_attrib ins_size(32);        // Required size attribute (in bits)
 2661 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 2662                                 // a non-matching short branch variant
 2663                                 // of some long branch?
 2664 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 2665                                 // be a power of 2) specifies the
 2666                                 // alignment that some part of the
 2667                                 // instruction (not necessarily the
 2668                                 // start) requires.  If > 1, a
 2669                                 // compute_padding() function must be
 2670                                 // provided for the instruction
 2671 
 2672 //----------OPERANDS-----------------------------------------------------------
 2673 // Operand definitions must precede instruction definitions for correct parsing
 2674 // in the ADLC because operands constitute user defined types which are used in
 2675 // instruction definitions.
 2676 
 2677 //----------Simple Operands----------------------------------------------------
 2678 
 2679 // Integer operands 32 bit
 2680 // 32 bit immediate
 2681 operand immI()
 2682 %{
 2683   match(ConI);
 2684 
 2685   op_cost(0);
 2686   format %{ %}
 2687   interface(CONST_INTER);
 2688 %}
 2689 
 2690 // 32 bit zero
 2691 operand immI0()
 2692 %{
 2693   predicate(n->get_int() == 0);
 2694   match(ConI);
 2695 
 2696   op_cost(0);
 2697   format %{ %}
 2698   interface(CONST_INTER);
 2699 %}
 2700 
 2701 // 32 bit unit increment
 2702 operand immI_1()
 2703 %{
 2704   predicate(n->get_int() == 1);
 2705   match(ConI);
 2706 
 2707   op_cost(0);
 2708   format %{ %}
 2709   interface(CONST_INTER);
 2710 %}
 2711 
 2712 // 32 bit unit decrement
 2713 operand immI_M1()
 2714 %{
 2715   predicate(n->get_int() == -1);
 2716   match(ConI);
 2717 
 2718   op_cost(0);
 2719   format %{ %}
 2720   interface(CONST_INTER);
 2721 %}
 2722 
 2723 // Unsigned Integer Immediate:  6-bit int, greater than 32
 2724 operand uimmI6_ge32() %{
 2725   predicate(((unsigned int)(n->get_int()) < 64) && (n->get_int() >= 32));
 2726   match(ConI);
 2727   op_cost(0);
 2728   format %{ %}
 2729   interface(CONST_INTER);
 2730 %}
 2731 
 2732 operand immI_le_4()
 2733 %{
 2734   predicate(n->get_int() <= 4);
 2735   match(ConI);
 2736 
 2737   op_cost(0);
 2738   format %{ %}
 2739   interface(CONST_INTER);
 2740 %}
 2741 
 2742 operand immI_16()
 2743 %{
 2744   predicate(n->get_int() == 16);
 2745   match(ConI);
 2746   op_cost(0);
 2747   format %{ %}
 2748   interface(CONST_INTER);
 2749 %}
 2750 
 2751 operand immI_24()
 2752 %{
 2753   predicate(n->get_int() == 24);
 2754   match(ConI);
 2755   op_cost(0);
 2756   format %{ %}
 2757   interface(CONST_INTER);
 2758 %}
 2759 
 2760 operand immI_31()
 2761 %{
 2762   predicate(n->get_int() == 31);
 2763   match(ConI);
 2764 
 2765   op_cost(0);
 2766   format %{ %}
 2767   interface(CONST_INTER);
 2768 %}
 2769 
 2770 operand immI_63()
 2771 %{
 2772   predicate(n->get_int() == 63);
 2773   match(ConI);
 2774 
 2775   op_cost(0);
 2776   format %{ %}
 2777   interface(CONST_INTER);
 2778 %}
 2779 
 2780 // 32 bit integer valid for add immediate
 2781 operand immIAdd()
 2782 %{
 2783   predicate(Assembler::is_simm12((int64_t)n->get_int()));
 2784   match(ConI);
 2785   op_cost(0);
 2786   format %{ %}
 2787   interface(CONST_INTER);
 2788 %}
 2789 
 2790 // 32 bit integer valid for sub immediate
 2791 operand immISub()
 2792 %{
 2793   predicate(Assembler::is_simm12(-(int64_t)n->get_int()));
 2794   match(ConI);
 2795   op_cost(0);
 2796   format %{ %}
 2797   interface(CONST_INTER);
 2798 %}
 2799 
 2800 // 5 bit signed value.
 2801 operand immI5()
 2802 %{
 2803   predicate(n->get_int() <= 15 && n->get_int() >= -16);
 2804   match(ConI);
 2805 
 2806   op_cost(0);
 2807   format %{ %}
 2808   interface(CONST_INTER);
 2809 %}
 2810 
 2811 // 5 bit signed value (simm5)
 2812 operand immL5()
 2813 %{
 2814   predicate(n->get_long() <= 15 && n->get_long() >= -16);
 2815   match(ConL);
 2816 
 2817   op_cost(0);
 2818   format %{ %}
 2819   interface(CONST_INTER);
 2820 %}
 2821 
 2822 // Integer operands 64 bit
 2823 // 64 bit immediate
 2824 operand immL()
 2825 %{
 2826   match(ConL);
 2827 
 2828   op_cost(0);
 2829   format %{ %}
 2830   interface(CONST_INTER);
 2831 %}
 2832 
 2833 // 64 bit zero
 2834 operand immL0()
 2835 %{
 2836   predicate(n->get_long() == 0);
 2837   match(ConL);
 2838 
 2839   op_cost(0);
 2840   format %{ %}
 2841   interface(CONST_INTER);
 2842 %}
 2843 
 2844 // Pointer operands
 2845 // Pointer Immediate
 2846 operand immP()
 2847 %{
 2848   match(ConP);
 2849 
 2850   op_cost(0);
 2851   format %{ %}
 2852   interface(CONST_INTER);
 2853 %}
 2854 
 2855 // Null Pointer Immediate
 2856 operand immP0()
 2857 %{
 2858   predicate(n->get_ptr() == 0);
 2859   match(ConP);
 2860 
 2861   op_cost(0);
 2862   format %{ %}
 2863   interface(CONST_INTER);
 2864 %}
 2865 
 2866 // Pointer Immediate One
 2867 // this is used in object initialization (initial object header)
 2868 operand immP_1()
 2869 %{
 2870   predicate(n->get_ptr() == 1);
 2871   match(ConP);
 2872 
 2873   op_cost(0);
 2874   format %{ %}
 2875   interface(CONST_INTER);
 2876 %}
 2877 
 2878 // Card Table Byte Map Base
 2879 operand immByteMapBase()
 2880 %{
 2881   // Get base of card map
 2882   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 2883             SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
 2884             (CardTable::CardValue*)n->get_ptr() ==
 2885              ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 2886   match(ConP);
 2887 
 2888   op_cost(0);
 2889   format %{ %}
 2890   interface(CONST_INTER);
 2891 %}
 2892 
 2893 // Int Immediate: low 16-bit mask
 2894 operand immI_16bits()
 2895 %{
 2896   predicate(n->get_int() == 0xFFFF);
 2897   match(ConI);
 2898   op_cost(0);
 2899   format %{ %}
 2900   interface(CONST_INTER);
 2901 %}
 2902 
 2903 operand immIpowerOf2() %{
 2904   predicate(is_power_of_2((juint)(n->get_int())));
 2905   match(ConI);
 2906   op_cost(0);
 2907   format %{ %}
 2908   interface(CONST_INTER);
 2909 %}
 2910 
 2911 // Long Immediate: low 32-bit mask
 2912 operand immL_32bits()
 2913 %{
 2914   predicate(n->get_long() == 0xFFFFFFFFL);
 2915   match(ConL);
 2916   op_cost(0);
 2917   format %{ %}
 2918   interface(CONST_INTER);
 2919 %}
 2920 
 2921 // 64 bit unit decrement
 2922 operand immL_M1()
 2923 %{
 2924   predicate(n->get_long() == -1);
 2925   match(ConL);
 2926 
 2927   op_cost(0);
 2928   format %{ %}
 2929   interface(CONST_INTER);
 2930 %}
 2931 
 2932 
 2933 // 64 bit integer valid for add immediate
 2934 operand immLAdd()
 2935 %{
 2936   predicate(Assembler::is_simm12(n->get_long()));
 2937   match(ConL);
 2938   op_cost(0);
 2939   format %{ %}
 2940   interface(CONST_INTER);
 2941 %}
 2942 
 2943 // 64 bit integer valid for sub immediate
 2944 operand immLSub()
 2945 %{
 2946   predicate(Assembler::is_simm12(-(n->get_long())));
 2947   match(ConL);
 2948   op_cost(0);
 2949   format %{ %}
 2950   interface(CONST_INTER);
 2951 %}
 2952 
 2953 // Narrow pointer operands
 2954 // Narrow Pointer Immediate
 2955 operand immN()
 2956 %{
 2957   match(ConN);
 2958 
 2959   op_cost(0);
 2960   format %{ %}
 2961   interface(CONST_INTER);
 2962 %}
 2963 
 2964 // Narrow Null Pointer Immediate
 2965 operand immN0()
 2966 %{
 2967   predicate(n->get_narrowcon() == 0);
 2968   match(ConN);
 2969 
 2970   op_cost(0);
 2971   format %{ %}
 2972   interface(CONST_INTER);
 2973 %}
 2974 
 2975 operand immNKlass()
 2976 %{
 2977   match(ConNKlass);
 2978 
 2979   op_cost(0);
 2980   format %{ %}
 2981   interface(CONST_INTER);
 2982 %}
 2983 
 2984 // Float and Double operands
 2985 // Double Immediate
 2986 operand immD()
 2987 %{
 2988   match(ConD);
 2989   op_cost(0);
 2990   format %{ %}
 2991   interface(CONST_INTER);
 2992 %}
 2993 
 2994 // Double Immediate: +0.0d
 2995 operand immD0()
 2996 %{
 2997   predicate(jlong_cast(n->getd()) == 0);
 2998   match(ConD);
 2999 
 3000   op_cost(0);
 3001   format %{ %}
 3002   interface(CONST_INTER);
 3003 %}
 3004 
 3005 // Float Immediate
 3006 operand immF()
 3007 %{
 3008   match(ConF);
 3009   op_cost(0);
 3010   format %{ %}
 3011   interface(CONST_INTER);
 3012 %}
 3013 
 3014 // Float Immediate: +0.0f.
 3015 operand immF0()
 3016 %{
 3017   predicate(jint_cast(n->getf()) == 0);
 3018   match(ConF);
 3019 
 3020   op_cost(0);
 3021   format %{ %}
 3022   interface(CONST_INTER);
 3023 %}
 3024 
 3025 // Half Float Immediate
 3026 operand immH()
 3027 %{
 3028   match(ConH);
 3029 
 3030   op_cost(0);
 3031   format %{ %}
 3032   interface(CONST_INTER);
 3033 %}
 3034 
 3035 // Half Float Immediate: +0.0f.
 3036 operand immH0()
 3037 %{
 3038   predicate(jint_cast(n->geth()) == 0);
 3039   match(ConH);
 3040 
 3041   op_cost(0);
 3042   format %{ %}
 3043   interface(CONST_INTER);
 3044 %}
 3045 
 3046 operand immIOffset()
 3047 %{
 3048   predicate(Assembler::is_simm12(n->get_int()));
 3049   match(ConI);
 3050   op_cost(0);
 3051   format %{ %}
 3052   interface(CONST_INTER);
 3053 %}
 3054 
 3055 operand immLOffset()
 3056 %{
 3057   predicate(Assembler::is_simm12(n->get_long()));
 3058   match(ConL);
 3059   op_cost(0);
 3060   format %{ %}
 3061   interface(CONST_INTER);
 3062 %}
 3063 
 3064 // Scale values
 3065 operand immIScale()
 3066 %{
 3067   predicate(1 <= n->get_int() && (n->get_int() <= 3));
 3068   match(ConI);
 3069 
 3070   op_cost(0);
 3071   format %{ %}
 3072   interface(CONST_INTER);
 3073 %}
 3074 
 3075 // Integer 32 bit Register Operands
 3076 operand iRegI()
 3077 %{
 3078   constraint(ALLOC_IN_RC(any_reg32));
 3079   match(RegI);
 3080   match(iRegINoSp);
 3081   op_cost(0);
 3082   format %{ %}
 3083   interface(REG_INTER);
 3084 %}
 3085 
 3086 // Integer 32 bit Register not Special
 3087 operand iRegINoSp()
 3088 %{
 3089   constraint(ALLOC_IN_RC(no_special_reg32));
 3090   match(RegI);
 3091   op_cost(0);
 3092   format %{ %}
 3093   interface(REG_INTER);
 3094 %}
 3095 
 3096 // Register R10 only
 3097 operand iRegI_R10()
 3098 %{
 3099   constraint(ALLOC_IN_RC(int_r10_reg));
 3100   match(RegI);
 3101   match(iRegINoSp);
 3102   op_cost(0);
 3103   format %{ %}
 3104   interface(REG_INTER);
 3105 %}
 3106 
 3107 // Register R12 only
 3108 operand iRegI_R12()
 3109 %{
 3110   constraint(ALLOC_IN_RC(int_r12_reg));
 3111   match(RegI);
 3112   match(iRegINoSp);
 3113   op_cost(0);
 3114   format %{ %}
 3115   interface(REG_INTER);
 3116 %}
 3117 
 3118 // Register R13 only
 3119 operand iRegI_R13()
 3120 %{
 3121   constraint(ALLOC_IN_RC(int_r13_reg));
 3122   match(RegI);
 3123   match(iRegINoSp);
 3124   op_cost(0);
 3125   format %{ %}
 3126   interface(REG_INTER);
 3127 %}
 3128 
 3129 // Register R14 only
 3130 operand iRegI_R14()
 3131 %{
 3132   constraint(ALLOC_IN_RC(int_r14_reg));
 3133   match(RegI);
 3134   match(iRegINoSp);
 3135   op_cost(0);
 3136   format %{ %}
 3137   interface(REG_INTER);
 3138 %}
 3139 
 3140 // Integer 64 bit Register Operands
 3141 operand iRegL()
 3142 %{
 3143   constraint(ALLOC_IN_RC(any_reg));
 3144   match(RegL);
 3145   match(iRegLNoSp);
 3146   op_cost(0);
 3147   format %{ %}
 3148   interface(REG_INTER);
 3149 %}
 3150 
 3151 // Integer 64 bit Register not Special
 3152 operand iRegLNoSp()
 3153 %{
 3154   constraint(ALLOC_IN_RC(no_special_reg));
 3155   match(RegL);
 3156   match(iRegL_R10);
 3157   format %{ %}
 3158   interface(REG_INTER);
 3159 %}
 3160 
 3161 // Long 64 bit Register R29 only
 3162 operand iRegL_R29()
 3163 %{
 3164   constraint(ALLOC_IN_RC(r29_reg));
 3165   match(RegL);
 3166   match(iRegLNoSp);
 3167   op_cost(0);
 3168   format %{ %}
 3169   interface(REG_INTER);
 3170 %}
 3171 
 3172 // Long 64 bit Register R30 only
 3173 operand iRegL_R30()
 3174 %{
 3175   constraint(ALLOC_IN_RC(r30_reg));
 3176   match(RegL);
 3177   match(iRegLNoSp);
 3178   op_cost(0);
 3179   format %{ %}
 3180   interface(REG_INTER);
 3181 %}
 3182 
 3183 // Pointer Register Operands
 3184 // Pointer Register
 3185 operand iRegP()
 3186 %{
 3187   constraint(ALLOC_IN_RC(ptr_reg));
 3188   match(RegP);
 3189   match(iRegPNoSp);
 3190   match(iRegP_R10);
 3191   match(iRegP_R15);
 3192   match(javaThread_RegP);
 3193   op_cost(0);
 3194   format %{ %}
 3195   interface(REG_INTER);
 3196 %}
 3197 
 3198 // Pointer 64 bit Register not Special
 3199 operand iRegPNoSp()
 3200 %{
 3201   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 3202   match(RegP);
 3203   op_cost(0);
 3204   format %{ %}
 3205   interface(REG_INTER);
 3206 %}
 3207 
 3208 // This operand is not allowed to use fp even if
 3209 // fp is not used to hold the frame pointer.
 3210 operand iRegPNoSpNoFp()
 3211 %{
 3212   constraint(ALLOC_IN_RC(no_special_no_fp_ptr_reg));
 3213   match(RegP);
 3214   match(iRegPNoSp);
 3215   op_cost(0);
 3216   format %{ %}
 3217   interface(REG_INTER);
 3218 %}
 3219 
 3220 operand iRegP_R10()
 3221 %{
 3222   constraint(ALLOC_IN_RC(r10_reg));
 3223   match(RegP);
 3224   // match(iRegP);
 3225   match(iRegPNoSp);
 3226   op_cost(0);
 3227   format %{ %}
 3228   interface(REG_INTER);
 3229 %}
 3230 
 3231 // Pointer 64 bit Register R11 only
 3232 operand iRegP_R11()
 3233 %{
 3234   constraint(ALLOC_IN_RC(r11_reg));
 3235   match(RegP);
 3236   match(iRegPNoSp);
 3237   op_cost(0);
 3238   format %{ %}
 3239   interface(REG_INTER);
 3240 %}
 3241 
 3242 operand iRegP_R12()
 3243 %{
 3244   constraint(ALLOC_IN_RC(r12_reg));
 3245   match(RegP);
 3246   // match(iRegP);
 3247   match(iRegPNoSp);
 3248   op_cost(0);
 3249   format %{ %}
 3250   interface(REG_INTER);
 3251 %}
 3252 
 3253 // Pointer 64 bit Register R13 only
 3254 operand iRegP_R13()
 3255 %{
 3256   constraint(ALLOC_IN_RC(r13_reg));
 3257   match(RegP);
 3258   match(iRegPNoSp);
 3259   op_cost(0);
 3260   format %{ %}
 3261   interface(REG_INTER);
 3262 %}
 3263 
 3264 operand iRegP_R14()
 3265 %{
 3266   constraint(ALLOC_IN_RC(r14_reg));
 3267   match(RegP);
 3268   // match(iRegP);
 3269   match(iRegPNoSp);
 3270   op_cost(0);
 3271   format %{ %}
 3272   interface(REG_INTER);
 3273 %}
 3274 
 3275 operand iRegP_R15()
 3276 %{
 3277   constraint(ALLOC_IN_RC(r15_reg));
 3278   match(RegP);
 3279   // match(iRegP);
 3280   match(iRegPNoSp);
 3281   op_cost(0);
 3282   format %{ %}
 3283   interface(REG_INTER);
 3284 %}
 3285 
 3286 operand iRegP_R16()
 3287 %{
 3288   constraint(ALLOC_IN_RC(r16_reg));
 3289   match(RegP);
 3290   match(iRegPNoSp);
 3291   op_cost(0);
 3292   format %{ %}
 3293   interface(REG_INTER);
 3294 %}
 3295 
 3296 // Pointer 64 bit Register R28 only
 3297 operand iRegP_R28()
 3298 %{
 3299   constraint(ALLOC_IN_RC(r28_reg));
 3300   match(RegP);
 3301   match(iRegPNoSp);
 3302   op_cost(0);
 3303   format %{ %}
 3304   interface(REG_INTER);
 3305 %}
 3306 
 3307 // Pointer 64 bit Register R30 only
 3308 operand iRegP_R30()
 3309 %{
 3310   constraint(ALLOC_IN_RC(r30_reg));
 3311   match(RegP);
 3312   match(iRegPNoSp);
 3313   op_cost(0);
 3314   format %{ %}
 3315   interface(REG_INTER);
 3316 %}
 3317 
 3318 // Pointer 64 bit Register R31 only
 3319 operand iRegP_R31()
 3320 %{
 3321   constraint(ALLOC_IN_RC(r31_reg));
 3322   match(RegP);
 3323   match(iRegPNoSp);
 3324   op_cost(0);
 3325   format %{ %}
 3326   interface(REG_INTER);
 3327 %}
 3328 
 3329 // Pointer Register Operands
 3330 // Narrow Pointer Register
 3331 operand iRegN()
 3332 %{
 3333   constraint(ALLOC_IN_RC(any_reg32));
 3334   match(RegN);
 3335   match(iRegNNoSp);
 3336   op_cost(0);
 3337   format %{ %}
 3338   interface(REG_INTER);
 3339 %}
 3340 
 3341 // Integer 64 bit Register not Special
 3342 operand iRegNNoSp()
 3343 %{
 3344   constraint(ALLOC_IN_RC(no_special_reg32));
 3345   match(RegN);
 3346   op_cost(0);
 3347   format %{ %}
 3348   interface(REG_INTER);
 3349 %}
 3350 
 3351 // Long 64 bit Register R10 only
 3352 operand iRegL_R10()
 3353 %{
 3354   constraint(ALLOC_IN_RC(r10_reg));
 3355   match(RegL);
 3356   match(iRegLNoSp);
 3357   op_cost(0);
 3358   format %{ %}
 3359   interface(REG_INTER);
 3360 %}
 3361 
 3362 // Float Register
 3363 // Float register operands
 3364 operand fRegF()
 3365 %{
 3366   constraint(ALLOC_IN_RC(float_reg));
 3367   match(RegF);
 3368 
 3369   op_cost(0);
 3370   format %{ %}
 3371   interface(REG_INTER);
 3372 %}
 3373 
 3374 // Double Register
 3375 // Double register operands
 3376 operand fRegD()
 3377 %{
 3378   constraint(ALLOC_IN_RC(double_reg));
 3379   match(RegD);
 3380 
 3381   op_cost(0);
 3382   format %{ %}
 3383   interface(REG_INTER);
 3384 %}
 3385 
 3386 // Generic vector class. This will be used for
 3387 // all vector operands.
 3388 operand vReg()
 3389 %{
 3390   constraint(ALLOC_IN_RC(vectora_reg));
 3391   match(VecA);
 3392   op_cost(0);
 3393   format %{ %}
 3394   interface(REG_INTER);
 3395 %}
 3396 
 3397 operand vReg_V1()
 3398 %{
 3399   constraint(ALLOC_IN_RC(v1_reg));
 3400   match(VecA);
 3401   match(vReg);
 3402   op_cost(0);
 3403   format %{ %}
 3404   interface(REG_INTER);
 3405 %}
 3406 
 3407 operand vReg_V2()
 3408 %{
 3409   constraint(ALLOC_IN_RC(v2_reg));
 3410   match(VecA);
 3411   match(vReg);
 3412   op_cost(0);
 3413   format %{ %}
 3414   interface(REG_INTER);
 3415 %}
 3416 
 3417 operand vReg_V3()
 3418 %{
 3419   constraint(ALLOC_IN_RC(v3_reg));
 3420   match(VecA);
 3421   match(vReg);
 3422   op_cost(0);
 3423   format %{ %}
 3424   interface(REG_INTER);
 3425 %}
 3426 
 3427 operand vReg_V4()
 3428 %{
 3429   constraint(ALLOC_IN_RC(v4_reg));
 3430   match(VecA);
 3431   match(vReg);
 3432   op_cost(0);
 3433   format %{ %}
 3434   interface(REG_INTER);
 3435 %}
 3436 
 3437 operand vReg_V5()
 3438 %{
 3439   constraint(ALLOC_IN_RC(v5_reg));
 3440   match(VecA);
 3441   match(vReg);
 3442   op_cost(0);
 3443   format %{ %}
 3444   interface(REG_INTER);
 3445 %}
 3446 
 3447 operand vReg_V6()
 3448 %{
 3449   constraint(ALLOC_IN_RC(v6_reg));
 3450   match(VecA);
 3451   match(vReg);
 3452   op_cost(0);
 3453   format %{ %}
 3454   interface(REG_INTER);
 3455 %}
 3456 
 3457 operand vReg_V7()
 3458 %{
 3459   constraint(ALLOC_IN_RC(v7_reg));
 3460   match(VecA);
 3461   match(vReg);
 3462   op_cost(0);
 3463   format %{ %}
 3464   interface(REG_INTER);
 3465 %}
 3466 
 3467 operand vReg_V8()
 3468 %{
 3469   constraint(ALLOC_IN_RC(v8_reg));
 3470   match(VecA);
 3471   match(vReg);
 3472   op_cost(0);
 3473   format %{ %}
 3474   interface(REG_INTER);
 3475 %}
 3476 
 3477 operand vReg_V9()
 3478 %{
 3479   constraint(ALLOC_IN_RC(v9_reg));
 3480   match(VecA);
 3481   match(vReg);
 3482   op_cost(0);
 3483   format %{ %}
 3484   interface(REG_INTER);
 3485 %}
 3486 
 3487 operand vReg_V10()
 3488 %{
 3489   constraint(ALLOC_IN_RC(v10_reg));
 3490   match(VecA);
 3491   match(vReg);
 3492   op_cost(0);
 3493   format %{ %}
 3494   interface(REG_INTER);
 3495 %}
 3496 
 3497 operand vReg_V11()
 3498 %{
 3499   constraint(ALLOC_IN_RC(v11_reg));
 3500   match(VecA);
 3501   match(vReg);
 3502   op_cost(0);
 3503   format %{ %}
 3504   interface(REG_INTER);
 3505 %}
 3506 
 3507 operand vRegMask()
 3508 %{
 3509   constraint(ALLOC_IN_RC(vmask_reg));
 3510   match(RegVectMask);
 3511   match(vRegMask_V0);
 3512   op_cost(0);
 3513   format %{ %}
 3514   interface(REG_INTER);
 3515 %}
 3516 
 3517 // The mask value used to control execution of a masked
 3518 // vector instruction is always supplied by vector register v0.
 3519 operand vRegMask_V0()
 3520 %{
 3521   constraint(ALLOC_IN_RC(vmask_reg_v0));
 3522   match(RegVectMask);
 3523   match(vRegMask);
 3524   op_cost(0);
 3525   format %{ %}
 3526   interface(REG_INTER);
 3527 %}
 3528 
 3529 // Java Thread Register
 3530 operand javaThread_RegP(iRegP reg)
 3531 %{
 3532   constraint(ALLOC_IN_RC(java_thread_reg)); // java_thread_reg
 3533   match(reg);
 3534   op_cost(0);
 3535   format %{ %}
 3536   interface(REG_INTER);
 3537 %}
 3538 
 3539 //----------Memory Operands----------------------------------------------------
 3540 // RISCV has only base_plus_offset and literal address mode, so no need to use
 3541 // index and scale. Here set index as 0xffffffff and scale as 0x0.
 3542 operand indirect(iRegP reg)
 3543 %{
 3544   constraint(ALLOC_IN_RC(ptr_reg));
 3545   match(reg);
 3546   op_cost(0);
 3547   format %{ "[$reg]" %}
 3548   interface(MEMORY_INTER) %{
 3549     base($reg);
 3550     index(0xffffffff);
 3551     scale(0x0);
 3552     disp(0x0);
 3553   %}
 3554 %}
 3555 
 3556 operand indOffI(iRegP reg, immIOffset off)
 3557 %{
 3558   constraint(ALLOC_IN_RC(ptr_reg));
 3559   match(AddP reg off);
 3560   op_cost(0);
 3561   format %{ "[$reg, $off]" %}
 3562   interface(MEMORY_INTER) %{
 3563     base($reg);
 3564     index(0xffffffff);
 3565     scale(0x0);
 3566     disp($off);
 3567   %}
 3568 %}
 3569 
 3570 operand indOffL(iRegP reg, immLOffset off)
 3571 %{
 3572   constraint(ALLOC_IN_RC(ptr_reg));
 3573   match(AddP reg off);
 3574   op_cost(0);
 3575   format %{ "[$reg, $off]" %}
 3576   interface(MEMORY_INTER) %{
 3577     base($reg);
 3578     index(0xffffffff);
 3579     scale(0x0);
 3580     disp($off);
 3581   %}
 3582 %}
 3583 
 3584 operand indirectN(iRegN reg)
 3585 %{
 3586   predicate(CompressedOops::shift() == 0);
 3587   constraint(ALLOC_IN_RC(ptr_reg));
 3588   match(DecodeN reg);
 3589   op_cost(0);
 3590   format %{ "[$reg]\t# narrow" %}
 3591   interface(MEMORY_INTER) %{
 3592     base($reg);
 3593     index(0xffffffff);
 3594     scale(0x0);
 3595     disp(0x0);
 3596   %}
 3597 %}
 3598 
 3599 operand indOffIN(iRegN reg, immIOffset off)
 3600 %{
 3601   predicate(CompressedOops::shift() == 0);
 3602   constraint(ALLOC_IN_RC(ptr_reg));
 3603   match(AddP (DecodeN reg) off);
 3604   op_cost(0);
 3605   format %{ "[$reg, $off]\t# narrow" %}
 3606   interface(MEMORY_INTER) %{
 3607     base($reg);
 3608     index(0xffffffff);
 3609     scale(0x0);
 3610     disp($off);
 3611   %}
 3612 %}
 3613 
 3614 operand indOffLN(iRegN reg, immLOffset off)
 3615 %{
 3616   predicate(CompressedOops::shift() == 0);
 3617   constraint(ALLOC_IN_RC(ptr_reg));
 3618   match(AddP (DecodeN reg) off);
 3619   op_cost(0);
 3620   format %{ "[$reg, $off]\t# narrow" %}
 3621   interface(MEMORY_INTER) %{
 3622     base($reg);
 3623     index(0xffffffff);
 3624     scale(0x0);
 3625     disp($off);
 3626   %}
 3627 %}
 3628 
 3629 //----------Special Memory Operands--------------------------------------------
 3630 // Stack Slot Operand - This operand is used for loading and storing temporary
 3631 //                      values on the stack where a match requires a value to
 3632 //                      flow through memory.
 3633 operand stackSlotI(sRegI reg)
 3634 %{
 3635   constraint(ALLOC_IN_RC(stack_slots));
 3636   // No match rule because this operand is only generated in matching
 3637   // match(RegI);
 3638   format %{ "[$reg]" %}
 3639   interface(MEMORY_INTER) %{
 3640     base(0x02);  // RSP
 3641     index(0xffffffff);  // No Index
 3642     scale(0x0);  // No Scale
 3643     disp($reg);  // Stack Offset
 3644   %}
 3645 %}
 3646 
 3647 operand stackSlotF(sRegF reg)
 3648 %{
 3649   constraint(ALLOC_IN_RC(stack_slots));
 3650   // No match rule because this operand is only generated in matching
 3651   // match(RegF);
 3652   format %{ "[$reg]" %}
 3653   interface(MEMORY_INTER) %{
 3654     base(0x02);  // RSP
 3655     index(0xffffffff);  // No Index
 3656     scale(0x0);  // No Scale
 3657     disp($reg);  // Stack Offset
 3658   %}
 3659 %}
 3660 
 3661 operand stackSlotD(sRegD reg)
 3662 %{
 3663   constraint(ALLOC_IN_RC(stack_slots));
 3664   // No match rule because this operand is only generated in matching
 3665   // match(RegD);
 3666   format %{ "[$reg]" %}
 3667   interface(MEMORY_INTER) %{
 3668     base(0x02);  // RSP
 3669     index(0xffffffff);  // No Index
 3670     scale(0x0);  // No Scale
 3671     disp($reg);  // Stack Offset
 3672   %}
 3673 %}
 3674 
 3675 operand stackSlotL(sRegL reg)
 3676 %{
 3677   constraint(ALLOC_IN_RC(stack_slots));
 3678   // No match rule because this operand is only generated in matching
 3679   // match(RegL);
 3680   format %{ "[$reg]" %}
 3681   interface(MEMORY_INTER) %{
 3682     base(0x02);  // RSP
 3683     index(0xffffffff);  // No Index
 3684     scale(0x0);  // No Scale
 3685     disp($reg);  // Stack Offset
 3686   %}
 3687 %}
 3688 
 3689 // Special operand allowing long args to int ops to be truncated for free
 3690 
 3691 operand iRegL2I(iRegL reg) %{
 3692 
 3693   op_cost(0);
 3694 
 3695   match(ConvL2I reg);
 3696 
 3697   format %{ "l2i($reg)" %}
 3698 
 3699   interface(REG_INTER)
 3700 %}
 3701 
 3702 
 3703 // Comparison Operands
 3704 // NOTE: Label is a predefined operand which should not be redefined in
 3705 //       the AD file. It is generically handled within the ADLC.
 3706 
 3707 //----------Conditional Branch Operands----------------------------------------
 3708 // Comparison Op  - This is the operation of the comparison, and is limited to
 3709 //                  the following set of codes:
 3710 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 3711 //
 3712 // Other attributes of the comparison, such as unsignedness, are specified
 3713 // by the comparison instruction that sets a condition code flags register.
 3714 // That result is represented by a flags operand whose subtype is appropriate
 3715 // to the unsignedness (etc.) of the comparison.
 3716 //
 3717 // Later, the instruction which matches both the Comparison Op (a Bool) and
 3718 // the flags (produced by the Cmp) specifies the coding of the comparison op
 3719 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 3720 
 3721 
 3722 // used for signed integral comparisons and fp comparisons
 3723 operand cmpOp()
 3724 %{
 3725   match(Bool);
 3726 
 3727   format %{ "" %}
 3728 
 3729   // the values in interface derives from struct BoolTest::mask
 3730   interface(COND_INTER) %{
 3731     equal(0x0, "eq");
 3732     greater(0x1, "gt");
 3733     overflow(0x2, "overflow");
 3734     less(0x3, "lt");
 3735     not_equal(0x4, "ne");
 3736     less_equal(0x5, "le");
 3737     no_overflow(0x6, "no_overflow");
 3738     greater_equal(0x7, "ge");
 3739   %}
 3740 %}
 3741 
 3742 // used for unsigned integral comparisons
 3743 operand cmpOpU()
 3744 %{
 3745   match(Bool);
 3746 
 3747   format %{ "" %}
 3748   // the values in interface derives from struct BoolTest::mask
 3749   interface(COND_INTER) %{
 3750     equal(0x0, "eq");
 3751     greater(0x1, "gtu");
 3752     overflow(0x2, "overflow");
 3753     less(0x3, "ltu");
 3754     not_equal(0x4, "ne");
 3755     less_equal(0x5, "leu");
 3756     no_overflow(0x6, "no_overflow");
 3757     greater_equal(0x7, "geu");
 3758   %}
 3759 %}
 3760 
 3761 // used for certain integral comparisons which can be
 3762 // converted to bxx instructions
 3763 operand cmpOpEqNe()
 3764 %{
 3765   match(Bool);
 3766   op_cost(0);
 3767   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3768             n->as_Bool()->_test._test == BoolTest::eq);
 3769 
 3770   format %{ "" %}
 3771   interface(COND_INTER) %{
 3772     equal(0x0, "eq");
 3773     greater(0x1, "gt");
 3774     overflow(0x2, "overflow");
 3775     less(0x3, "lt");
 3776     not_equal(0x4, "ne");
 3777     less_equal(0x5, "le");
 3778     no_overflow(0x6, "no_overflow");
 3779     greater_equal(0x7, "ge");
 3780   %}
 3781 %}
 3782 
 3783 operand cmpOpULtGe()
 3784 %{
 3785   match(Bool);
 3786   op_cost(0);
 3787   predicate(n->as_Bool()->_test._test == BoolTest::lt ||
 3788             n->as_Bool()->_test._test == BoolTest::ge);
 3789 
 3790   format %{ "" %}
 3791   interface(COND_INTER) %{
 3792     equal(0x0, "eq");
 3793     greater(0x1, "gtu");
 3794     overflow(0x2, "overflow");
 3795     less(0x3, "ltu");
 3796     not_equal(0x4, "ne");
 3797     less_equal(0x5, "leu");
 3798     no_overflow(0x6, "no_overflow");
 3799     greater_equal(0x7, "geu");
 3800   %}
 3801 %}
 3802 
 3803 operand cmpOpUEqNeLeGt()
 3804 %{
 3805   match(Bool);
 3806   op_cost(0);
 3807   predicate(n->as_Bool()->_test._test == BoolTest::ne ||
 3808             n->as_Bool()->_test._test == BoolTest::eq ||
 3809             n->as_Bool()->_test._test == BoolTest::le ||
 3810             n->as_Bool()->_test._test == BoolTest::gt);
 3811 
 3812   format %{ "" %}
 3813   interface(COND_INTER) %{
 3814     equal(0x0, "eq");
 3815     greater(0x1, "gtu");
 3816     overflow(0x2, "overflow");
 3817     less(0x3, "ltu");
 3818     not_equal(0x4, "ne");
 3819     less_equal(0x5, "leu");
 3820     no_overflow(0x6, "no_overflow");
 3821     greater_equal(0x7, "geu");
 3822   %}
 3823 %}
 3824 
 3825 
 3826 // Flags register, used as output of compare logic
 3827 operand rFlagsReg()
 3828 %{
 3829   constraint(ALLOC_IN_RC(reg_flags));
 3830   match(RegFlags);
 3831 
 3832   op_cost(0);
 3833   format %{ "RFLAGS" %}
 3834   interface(REG_INTER);
 3835 %}
 3836 
 3837 // Special Registers
 3838 
 3839 // Method Register
 3840 operand inline_cache_RegP(iRegP reg)
 3841 %{
 3842   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 3843   match(reg);
 3844   match(iRegPNoSp);
 3845   op_cost(0);
 3846   format %{ %}
 3847   interface(REG_INTER);
 3848 %}
 3849 
 3850 //----------OPERAND CLASSES----------------------------------------------------
 3851 // Operand Classes are groups of operands that are used as to simplify
 3852 // instruction definitions by not requiring the AD writer to specify
 3853 // separate instructions for every form of operand when the
 3854 // instruction accepts multiple operand types with the same basic
 3855 // encoding and format. The classic case of this is memory operands.
 3856 
 3857 // memory is used to define read/write location for load/store
 3858 // instruction defs. we can turn a memory op into an Address
 3859 
 3860 opclass memory(indirect, indOffI, indOffL, indirectN, indOffIN, indOffLN);
 3861 
 3862 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 3863 // operations. it allows the src to be either an iRegI or a (ConvL2I
 3864 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 3865 // can be elided because the 32-bit instruction will just employ the
 3866 // lower 32 bits anyway.
 3867 //
 3868 // n.b. this does not elide all L2I conversions. if the truncated
 3869 // value is consumed by more than one operation then the ConvL2I
 3870 // cannot be bundled into the consuming nodes so an l2i gets planted
 3871 // (actually an addiw $dst, $src, 0) and the downstream instructions
 3872 // consume the result of the L2I as an iRegI input. That's a shame since
 3873 // the addiw is actually redundant but its not too costly.
 3874 
 3875 opclass iRegIorL2I(iRegI, iRegL2I);
 3876 opclass iRegIorL(iRegI, iRegL);
 3877 opclass iRegNorP(iRegN, iRegP);
 3878 opclass iRegILNP(iRegI, iRegL, iRegN, iRegP);
 3879 opclass iRegILNPNoSp(iRegINoSp, iRegLNoSp, iRegNNoSp, iRegPNoSp);
 3880 opclass immIorL(immI, immL);
 3881 
 3882 //----------PIPELINE-----------------------------------------------------------
 3883 // Rules which define the behavior of the target architectures pipeline.
 3884 
 3885 // For specific pipelines, e.g. generic RISC-V, define the stages of that pipeline
 3886 //pipe_desc(ID, EX, MEM, WR);
 3887 #define ID   S0
 3888 #define EX   S1
 3889 #define MEM  S2
 3890 #define WR   S3
 3891 
 3892 // Integer ALU reg operation
 3893 pipeline %{
 3894 
 3895 attributes %{
 3896   // RISC-V instructions are of fixed length
 3897   fixed_size_instructions;           // Fixed size instructions TODO does
 3898   max_instructions_per_bundle = 2;   // Generic RISC-V 1, Sifive Series 7 2
 3899   // RISC-V instructions come in 32-bit word units
 3900   instruction_unit_size = 4;         // An instruction is 4 bytes long
 3901   instruction_fetch_unit_size = 64;  // The processor fetches one line
 3902   instruction_fetch_units = 1;       // of 64 bytes
 3903 
 3904   // List of nop instructions
 3905   nops( MachNop );
 3906 %}
 3907 
 3908 // We don't use an actual pipeline model so don't care about resources
 3909 // or description. we do use pipeline classes to introduce fixed
 3910 // latencies
 3911 
 3912 //----------RESOURCES----------------------------------------------------------
 3913 // Resources are the functional units available to the machine
 3914 
 3915 // Generic RISC-V pipeline
 3916 // 1 decoder
 3917 // 1 instruction decoded per cycle
 3918 // 1 load/store ops per cycle, 1 branch, 1 FPU
 3919 // 1 mul, 1 div
 3920 
 3921 resources ( DECODE,
 3922             ALU,
 3923             MUL,
 3924             DIV,
 3925             BRANCH,
 3926             LDST,
 3927             FPU);
 3928 
 3929 //----------PIPELINE DESCRIPTION-----------------------------------------------
 3930 // Pipeline Description specifies the stages in the machine's pipeline
 3931 
 3932 // Define the pipeline as a generic 6 stage pipeline
 3933 pipe_desc(S0, S1, S2, S3, S4, S5);
 3934 
 3935 //----------PIPELINE CLASSES---------------------------------------------------
 3936 // Pipeline Classes describe the stages in which input and output are
 3937 // referenced by the hardware pipeline.
 3938 
 3939 pipe_class fp_dop_reg_reg_s(fRegF dst, fRegF src1, fRegF src2)
 3940 %{
 3941   single_instruction;
 3942   src1   : S1(read);
 3943   src2   : S2(read);
 3944   dst    : S5(write);
 3945   DECODE : ID;
 3946   FPU    : S5;
 3947 %}
 3948 
 3949 pipe_class fp_dop_reg_reg_d(fRegD dst, fRegD src1, fRegD src2)
 3950 %{
 3951   src1   : S1(read);
 3952   src2   : S2(read);
 3953   dst    : S5(write);
 3954   DECODE : ID;
 3955   FPU    : S5;
 3956 %}
 3957 
 3958 pipe_class fp_uop_s(fRegF dst, fRegF src)
 3959 %{
 3960   single_instruction;
 3961   src    : S1(read);
 3962   dst    : S5(write);
 3963   DECODE : ID;
 3964   FPU    : S5;
 3965 %}
 3966 
 3967 pipe_class fp_uop_d(fRegD dst, fRegD src)
 3968 %{
 3969   single_instruction;
 3970   src    : S1(read);
 3971   dst    : S5(write);
 3972   DECODE : ID;
 3973   FPU    : S5;
 3974 %}
 3975 
 3976 pipe_class fp_d2f(fRegF dst, fRegD src)
 3977 %{
 3978   single_instruction;
 3979   src    : S1(read);
 3980   dst    : S5(write);
 3981   DECODE : ID;
 3982   FPU    : S5;
 3983 %}
 3984 
 3985 pipe_class fp_f2d(fRegD dst, fRegF src)
 3986 %{
 3987   single_instruction;
 3988   src    : S1(read);
 3989   dst    : S5(write);
 3990   DECODE : ID;
 3991   FPU    : S5;
 3992 %}
 3993 
 3994 pipe_class fp_f2i(iRegINoSp dst, fRegF src)
 3995 %{
 3996   single_instruction;
 3997   src    : S1(read);
 3998   dst    : S5(write);
 3999   DECODE : ID;
 4000   FPU    : S5;
 4001 %}
 4002 
 4003 pipe_class fp_f2l(iRegLNoSp dst, fRegF src)
 4004 %{
 4005   single_instruction;
 4006   src    : S1(read);
 4007   dst    : S5(write);
 4008   DECODE : ID;
 4009   FPU    : S5;
 4010 %}
 4011 
 4012 pipe_class fp_i2f(fRegF dst, iRegIorL2I src)
 4013 %{
 4014   single_instruction;
 4015   src    : S1(read);
 4016   dst    : S5(write);
 4017   DECODE : ID;
 4018   FPU    : S5;
 4019 %}
 4020 
 4021 pipe_class fp_l2f(fRegF dst, iRegL src)
 4022 %{
 4023   single_instruction;
 4024   src    : S1(read);
 4025   dst    : S5(write);
 4026   DECODE : ID;
 4027   FPU    : S5;
 4028 %}
 4029 
 4030 pipe_class fp_d2i(iRegINoSp dst, fRegD src)
 4031 %{
 4032   single_instruction;
 4033   src    : S1(read);
 4034   dst    : S5(write);
 4035   DECODE : ID;
 4036   FPU    : S5;
 4037 %}
 4038 
 4039 pipe_class fp_d2l(iRegLNoSp dst, fRegD src)
 4040 %{
 4041   single_instruction;
 4042   src    : S1(read);
 4043   dst    : S5(write);
 4044   DECODE : ID;
 4045   FPU    : S5;
 4046 %}
 4047 
 4048 pipe_class fp_i2d(fRegD dst, iRegIorL2I src)
 4049 %{
 4050   single_instruction;
 4051   src    : S1(read);
 4052   dst    : S5(write);
 4053   DECODE : ID;
 4054   FPU    : S5;
 4055 %}
 4056 
 4057 pipe_class fp_l2d(fRegD dst, iRegIorL2I src)
 4058 %{
 4059   single_instruction;
 4060   src    : S1(read);
 4061   dst    : S5(write);
 4062   DECODE : ID;
 4063   FPU    : S5;
 4064 %}
 4065 
 4066 pipe_class fp_div_s(fRegF dst, fRegF src1, fRegF src2)
 4067 %{
 4068   single_instruction;
 4069   src1   : S1(read);
 4070   src2   : S2(read);
 4071   dst    : S5(write);
 4072   DECODE : ID;
 4073   FPU    : S5;
 4074 %}
 4075 
 4076 pipe_class fp_div_d(fRegD dst, fRegD src1, fRegD src2)
 4077 %{
 4078   single_instruction;
 4079   src1   : S1(read);
 4080   src2   : S2(read);
 4081   dst    : S5(write);
 4082   DECODE : ID;
 4083   FPU    : S5;
 4084 %}
 4085 
 4086 pipe_class fp_sqrt_s(fRegF dst, fRegF src)
 4087 %{
 4088   single_instruction;
 4089   src    : S1(read);
 4090   dst    : S5(write);
 4091   DECODE : ID;
 4092   FPU    : S5;
 4093 %}
 4094 
 4095 pipe_class fp_sqrt_d(fRegD dst, fRegD src)
 4096 %{
 4097   single_instruction;
 4098   src    : S1(read);
 4099   dst    : S5(write);
 4100   DECODE : ID;
 4101   FPU    : S5;
 4102 %}
 4103 
 4104 pipe_class fp_load_constant_s(fRegF dst)
 4105 %{
 4106   single_instruction;
 4107   dst    : S5(write);
 4108   DECODE : ID;
 4109   FPU    : S5;
 4110 %}
 4111 
 4112 pipe_class fp_load_constant_d(fRegD dst)
 4113 %{
 4114   single_instruction;
 4115   dst    : S5(write);
 4116   DECODE : ID;
 4117   FPU    : S5;
 4118 %}
 4119 
 4120 pipe_class fp_load_mem_s(fRegF dst, memory mem)
 4121 %{
 4122   single_instruction;
 4123   mem    : S1(read);
 4124   dst    : S5(write);
 4125   DECODE : ID;
 4126   LDST   : MEM;
 4127 %}
 4128 
 4129 pipe_class fp_load_mem_d(fRegD dst, memory mem)
 4130 %{
 4131   single_instruction;
 4132   mem    : S1(read);
 4133   dst    : S5(write);
 4134   DECODE : ID;
 4135   LDST   : MEM;
 4136 %}
 4137 
 4138 pipe_class fp_store_reg_s(fRegF src, memory mem)
 4139 %{
 4140   single_instruction;
 4141   src    : S1(read);
 4142   mem    : S5(write);
 4143   DECODE : ID;
 4144   LDST   : MEM;
 4145 %}
 4146 
 4147 pipe_class fp_store_reg_d(fRegD src, memory mem)
 4148 %{
 4149   single_instruction;
 4150   src    : S1(read);
 4151   mem    : S5(write);
 4152   DECODE : ID;
 4153   LDST   : MEM;
 4154 %}
 4155 
 4156 //------- Integer ALU operations --------------------------
 4157 
 4158 // Integer ALU reg-reg operation
 4159 // Operands needs in ID, result generated in EX
 4160 // E.g.  ADD   Rd, Rs1, Rs2
 4161 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4162 %{
 4163   single_instruction;
 4164   dst    : EX(write);
 4165   src1   : ID(read);
 4166   src2   : ID(read);
 4167   DECODE : ID;
 4168   ALU    : EX;
 4169 %}
 4170 
 4171 // Integer ALU reg operation with constant shift
 4172 // E.g. SLLI    Rd, Rs1, #shift
 4173 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 4174 %{
 4175   single_instruction;
 4176   dst    : EX(write);
 4177   src1   : ID(read);
 4178   DECODE : ID;
 4179   ALU    : EX;
 4180 %}
 4181 
 4182 // Integer ALU reg-reg operation with variable shift
 4183 // both operands must be available in ID
 4184 // E.g. SLL   Rd, Rs1, Rs2
 4185 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 4186 %{
 4187   single_instruction;
 4188   dst    : EX(write);
 4189   src1   : ID(read);
 4190   src2   : ID(read);
 4191   DECODE : ID;
 4192   ALU    : EX;
 4193 %}
 4194 
 4195 // Integer ALU reg operation
 4196 // E.g. NEG   Rd, Rs2
 4197 pipe_class ialu_reg(iRegI dst, iRegI src)
 4198 %{
 4199   single_instruction;
 4200   dst    : EX(write);
 4201   src    : ID(read);
 4202   DECODE : ID;
 4203   ALU    : EX;
 4204 %}
 4205 
 4206 // Integer ALU reg immediate operation
 4207 // E.g. ADDI   Rd, Rs1, #imm
 4208 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 4209 %{
 4210   single_instruction;
 4211   dst    : EX(write);
 4212   src1   : ID(read);
 4213   DECODE : ID;
 4214   ALU    : EX;
 4215 %}
 4216 
 4217 // Integer ALU immediate operation (no source operands)
 4218 // E.g. LI    Rd, #imm
 4219 pipe_class ialu_imm(iRegI dst)
 4220 %{
 4221   single_instruction;
 4222   dst    : EX(write);
 4223   DECODE : ID;
 4224   ALU    : EX;
 4225 %}
 4226 
 4227 //------- Multiply pipeline operations --------------------
 4228 
 4229 // Multiply reg-reg
 4230 // E.g. MULW   Rd, Rs1, Rs2
 4231 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4232 %{
 4233   single_instruction;
 4234   dst    : WR(write);
 4235   src1   : ID(read);
 4236   src2   : ID(read);
 4237   DECODE : ID;
 4238   MUL    : WR;
 4239 %}
 4240 
 4241 // E.g. MUL   RD, Rs1, Rs2
 4242 pipe_class lmul_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4243 %{
 4244   single_instruction;
 4245   fixed_latency(3); // Maximum latency for 64 bit mul
 4246   dst    : WR(write);
 4247   src1   : ID(read);
 4248   src2   : ID(read);
 4249   DECODE : ID;
 4250   MUL    : WR;
 4251 %}
 4252 
 4253 //------- Divide pipeline operations --------------------
 4254 
 4255 // E.g. DIVW   Rd, Rs1, Rs2
 4256 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 4257 %{
 4258   single_instruction;
 4259   fixed_latency(8); // Maximum latency for 32 bit divide
 4260   dst    : WR(write);
 4261   src1   : ID(read);
 4262   src2   : ID(read);
 4263   DECODE : ID;
 4264   DIV    : WR;
 4265 %}
 4266 
 4267 // E.g. DIV   RD, Rs1, Rs2
 4268 pipe_class ldiv_reg_reg(iRegL dst, iRegL src1, iRegL src2)
 4269 %{
 4270   single_instruction;
 4271   fixed_latency(16); // Maximum latency for 64 bit divide
 4272   dst    : WR(write);
 4273   src1   : ID(read);
 4274   src2   : ID(read);
 4275   DECODE : ID;
 4276   DIV    : WR;
 4277 %}
 4278 
 4279 //------- Load pipeline operations ------------------------
 4280 
 4281 // Load - prefetch
 4282 // Eg.  PREFETCH_W  mem
 4283 pipe_class iload_prefetch(memory mem)
 4284 %{
 4285   single_instruction;
 4286   mem    : ID(read);
 4287   DECODE : ID;
 4288   LDST   : MEM;
 4289 %}
 4290 
 4291 // Load - reg, mem
 4292 // E.g. LA    Rd, mem
 4293 pipe_class iload_reg_mem(iRegI dst, memory mem)
 4294 %{
 4295   single_instruction;
 4296   dst    : WR(write);
 4297   mem    : ID(read);
 4298   DECODE : ID;
 4299   LDST   : MEM;
 4300 %}
 4301 
 4302 // Load - reg, reg
 4303 // E.g. LD    Rd, Rs
 4304 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 4305 %{
 4306   single_instruction;
 4307   dst    : WR(write);
 4308   src    : ID(read);
 4309   DECODE : ID;
 4310   LDST   : MEM;
 4311 %}
 4312 
 4313 //------- Store pipeline operations -----------------------
 4314 
 4315 // Store - zr, mem
 4316 // E.g. SD    zr, mem
 4317 pipe_class istore_mem(memory mem)
 4318 %{
 4319   single_instruction;
 4320   mem    : ID(read);
 4321   DECODE : ID;
 4322   LDST   : MEM;
 4323 %}
 4324 
 4325 // Store - reg, mem
 4326 // E.g. SD    Rs, mem
 4327 pipe_class istore_reg_mem(iRegI src, memory mem)
 4328 %{
 4329   single_instruction;
 4330   mem    : ID(read);
 4331   src    : EX(read);
 4332   DECODE : ID;
 4333   LDST   : MEM;
 4334 %}
 4335 
 4336 // Store - reg, reg
 4337 // E.g. SD    Rs2, Rs1
 4338 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 4339 %{
 4340   single_instruction;
 4341   dst    : ID(read);
 4342   src    : EX(read);
 4343   DECODE : ID;
 4344   LDST   : MEM;
 4345 %}
 4346 
 4347 //------- Control transfer pipeline operations ------------
 4348 
 4349 // Branch
 4350 pipe_class pipe_branch()
 4351 %{
 4352   single_instruction;
 4353   DECODE : ID;
 4354   BRANCH : EX;
 4355 %}
 4356 
 4357 // Branch
 4358 pipe_class pipe_branch_reg(iRegI src)
 4359 %{
 4360   single_instruction;
 4361   src    : ID(read);
 4362   DECODE : ID;
 4363   BRANCH : EX;
 4364 %}
 4365 
 4366 // Compare & Branch
 4367 // E.g. BEQ   Rs1, Rs2, L
 4368 pipe_class pipe_cmp_branch(iRegI src1, iRegI src2)
 4369 %{
 4370   single_instruction;
 4371   src1   : ID(read);
 4372   src2   : ID(read);
 4373   DECODE : ID;
 4374   BRANCH : EX;
 4375 %}
 4376 
 4377 // E.g. BEQZ Rs, L
 4378 pipe_class pipe_cmpz_branch(iRegI src)
 4379 %{
 4380   single_instruction;
 4381   src    : ID(read);
 4382   DECODE : ID;
 4383   BRANCH : EX;
 4384 %}
 4385 
 4386 //------- Synchronisation operations ----------------------
 4387 // Any operation requiring serialization
 4388 // E.g. FENCE/Atomic Ops/Load Acquire/Store Release
 4389 pipe_class pipe_serial()
 4390 %{
 4391   single_instruction;
 4392   force_serialization;
 4393   fixed_latency(16);
 4394   DECODE : ID;
 4395   LDST   : MEM;
 4396 %}
 4397 
 4398 pipe_class pipe_slow()
 4399 %{
 4400   instruction_count(10);
 4401   multiple_bundles;
 4402   force_serialization;
 4403   fixed_latency(16);
 4404   DECODE : ID;
 4405   LDST   : MEM;
 4406 %}
 4407 
 4408 // The real do-nothing guy
 4409 pipe_class real_empty()
 4410 %{
 4411     instruction_count(0);
 4412 %}
 4413 
 4414 // Empty pipeline class
 4415 pipe_class pipe_class_empty()
 4416 %{
 4417   single_instruction;
 4418   fixed_latency(0);
 4419 %}
 4420 
 4421 // Default pipeline class.
 4422 pipe_class pipe_class_default()
 4423 %{
 4424   single_instruction;
 4425   fixed_latency(2);
 4426 %}
 4427 
 4428 // Pipeline class for compares.
 4429 pipe_class pipe_class_compare()
 4430 %{
 4431   single_instruction;
 4432   fixed_latency(16);
 4433 %}
 4434 
 4435 // Pipeline class for memory operations.
 4436 pipe_class pipe_class_memory()
 4437 %{
 4438   single_instruction;
 4439   fixed_latency(16);
 4440 %}
 4441 
 4442 // Pipeline class for call.
 4443 pipe_class pipe_class_call()
 4444 %{
 4445   single_instruction;
 4446   fixed_latency(100);
 4447 %}
 4448 
 4449 // Define the class for the Nop node.
 4450 define %{
 4451    MachNop = pipe_class_empty;
 4452 %}
 4453 %}
 4454 //----------INSTRUCTIONS-------------------------------------------------------
 4455 //
 4456 // match      -- States which machine-independent subtree may be replaced
 4457 //               by this instruction.
 4458 // ins_cost   -- The estimated cost of this instruction is used by instruction
 4459 //               selection to identify a minimum cost tree of machine
 4460 //               instructions that matches a tree of machine-independent
 4461 //               instructions.
 4462 // format     -- A string providing the disassembly for this instruction.
 4463 //               The value of an instruction's operand may be inserted
 4464 //               by referring to it with a '$' prefix.
 4465 // opcode     -- Three instruction opcodes may be provided.  These are referred
 4466 //               to within an encode class as $primary, $secondary, and $tertiary
 4467 //               rrspectively.  The primary opcode is commonly used to
 4468 //               indicate the type of machine instruction, while secondary
 4469 //               and tertiary are often used for prefix options or addressing
 4470 //               modes.
 4471 // ins_encode -- A list of encode classes with parameters. The encode class
 4472 //               name must have been defined in an 'enc_class' specification
 4473 //               in the encode section of the architecture description.
 4474 
 4475 // ============================================================================
 4476 // Memory (Load/Store) Instructions
 4477 
 4478 // Load Instructions
 4479 
 4480 // Load Byte (8 bit signed)
 4481 instruct loadB(iRegINoSp dst, memory mem)
 4482 %{
 4483   match(Set dst (LoadB mem));
 4484 
 4485   ins_cost(LOAD_COST);
 4486   format %{ "lb  $dst, $mem\t# byte, #@loadB" %}
 4487 
 4488   ins_encode %{
 4489     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4490   %}
 4491 
 4492   ins_pipe(iload_reg_mem);
 4493 %}
 4494 
 4495 // Load Byte (8 bit signed) into long
 4496 instruct loadB2L(iRegLNoSp dst, memory mem)
 4497 %{
 4498   match(Set dst (ConvI2L (LoadB mem)));
 4499 
 4500   ins_cost(LOAD_COST);
 4501   format %{ "lb  $dst, $mem\t# byte, #@loadB2L" %}
 4502 
 4503   ins_encode %{
 4504     __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4505   %}
 4506 
 4507   ins_pipe(iload_reg_mem);
 4508 %}
 4509 
 4510 // Load Byte (8 bit unsigned)
 4511 instruct loadUB(iRegINoSp dst, memory mem)
 4512 %{
 4513   match(Set dst (LoadUB mem));
 4514 
 4515   ins_cost(LOAD_COST);
 4516   format %{ "lbu  $dst, $mem\t# byte, #@loadUB" %}
 4517 
 4518   ins_encode %{
 4519     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4520   %}
 4521 
 4522   ins_pipe(iload_reg_mem);
 4523 %}
 4524 
 4525 // Load Byte (8 bit unsigned) into long
 4526 instruct loadUB2L(iRegLNoSp dst, memory mem)
 4527 %{
 4528   match(Set dst (ConvI2L (LoadUB mem)));
 4529 
 4530   ins_cost(LOAD_COST);
 4531   format %{ "lbu  $dst, $mem\t# byte, #@loadUB2L" %}
 4532 
 4533   ins_encode %{
 4534     __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4535   %}
 4536 
 4537   ins_pipe(iload_reg_mem);
 4538 %}
 4539 
 4540 // Load Short (16 bit signed)
 4541 instruct loadS(iRegINoSp dst, memory mem)
 4542 %{
 4543   match(Set dst (LoadS mem));
 4544 
 4545   ins_cost(LOAD_COST);
 4546   format %{ "lh  $dst, $mem\t# short, #@loadS" %}
 4547 
 4548   ins_encode %{
 4549     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4550   %}
 4551 
 4552   ins_pipe(iload_reg_mem);
 4553 %}
 4554 
 4555 // Load Short (16 bit signed) into long
 4556 instruct loadS2L(iRegLNoSp dst, memory mem)
 4557 %{
 4558   match(Set dst (ConvI2L (LoadS mem)));
 4559 
 4560   ins_cost(LOAD_COST);
 4561   format %{ "lh  $dst, $mem\t# short, #@loadS2L" %}
 4562 
 4563   ins_encode %{
 4564     __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4565   %}
 4566 
 4567   ins_pipe(iload_reg_mem);
 4568 %}
 4569 
 4570 // Load Char (16 bit unsigned)
 4571 instruct loadUS(iRegINoSp dst, memory mem)
 4572 %{
 4573   match(Set dst (LoadUS mem));
 4574 
 4575   ins_cost(LOAD_COST);
 4576   format %{ "lhu  $dst, $mem\t# short, #@loadUS" %}
 4577 
 4578   ins_encode %{
 4579     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4580   %}
 4581 
 4582   ins_pipe(iload_reg_mem);
 4583 %}
 4584 
 4585 // Load Short/Char (16 bit unsigned) into long
 4586 instruct loadUS2L(iRegLNoSp dst, memory mem)
 4587 %{
 4588   match(Set dst (ConvI2L (LoadUS mem)));
 4589 
 4590   ins_cost(LOAD_COST);
 4591   format %{ "lhu  $dst, $mem\t# short, #@loadUS2L" %}
 4592 
 4593   ins_encode %{
 4594     __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4595   %}
 4596 
 4597   ins_pipe(iload_reg_mem);
 4598 %}
 4599 
 4600 // Load Integer (32 bit signed)
 4601 instruct loadI(iRegINoSp dst, memory mem)
 4602 %{
 4603   match(Set dst (LoadI mem));
 4604 
 4605   ins_cost(LOAD_COST);
 4606   format %{ "lw  $dst, $mem\t# int, #@loadI" %}
 4607 
 4608   ins_encode %{
 4609     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4610   %}
 4611 
 4612   ins_pipe(iload_reg_mem);
 4613 %}
 4614 
 4615 // Load Integer (32 bit signed) into long
 4616 instruct loadI2L(iRegLNoSp dst, memory mem)
 4617 %{
 4618   match(Set dst (ConvI2L (LoadI mem)));
 4619 
 4620   ins_cost(LOAD_COST);
 4621   format %{ "lw  $dst, $mem\t# int, #@loadI2L" %}
 4622 
 4623   ins_encode %{
 4624     __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4625   %}
 4626 
 4627   ins_pipe(iload_reg_mem);
 4628 %}
 4629 
 4630 // Load Integer (32 bit unsigned) into long
 4631 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
 4632 %{
 4633   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 4634 
 4635   ins_cost(LOAD_COST);
 4636   format %{ "lwu  $dst, $mem\t# int, #@loadUI2L" %}
 4637 
 4638   ins_encode %{
 4639     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4640   %}
 4641 
 4642   ins_pipe(iload_reg_mem);
 4643 %}
 4644 
 4645 // Load Long (64 bit signed)
 4646 instruct loadL(iRegLNoSp dst, memory mem)
 4647 %{
 4648   match(Set dst (LoadL mem));
 4649 
 4650   ins_cost(LOAD_COST);
 4651   format %{ "ld  $dst, $mem\t# int, #@loadL" %}
 4652 
 4653   ins_encode %{
 4654     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4655   %}
 4656 
 4657   ins_pipe(iload_reg_mem);
 4658 %}
 4659 
 4660 // Load Range
 4661 instruct loadRange(iRegINoSp dst, memory mem)
 4662 %{
 4663   match(Set dst (LoadRange mem));
 4664 
 4665   ins_cost(LOAD_COST);
 4666   format %{ "lwu  $dst, $mem\t# range, #@loadRange" %}
 4667 
 4668   ins_encode %{
 4669     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4670   %}
 4671 
 4672   ins_pipe(iload_reg_mem);
 4673 %}
 4674 
 4675 // Load Pointer
 4676 instruct loadP(iRegPNoSp dst, memory mem)
 4677 %{
 4678   match(Set dst (LoadP mem));
 4679   predicate(n->as_Load()->barrier_data() == 0);
 4680 
 4681   ins_cost(LOAD_COST);
 4682   format %{ "ld  $dst, $mem\t# ptr, #@loadP" %}
 4683 
 4684   ins_encode %{
 4685     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4686   %}
 4687 
 4688   ins_pipe(iload_reg_mem);
 4689 %}
 4690 
 4691 // Load Compressed Pointer
 4692 instruct loadN(iRegNNoSp dst, memory mem)
 4693 %{
 4694   predicate(n->as_Load()->barrier_data() == 0);
 4695   match(Set dst (LoadN mem));
 4696 
 4697   ins_cost(LOAD_COST);
 4698   format %{ "lwu  $dst, $mem\t# compressed ptr, #@loadN" %}
 4699 
 4700   ins_encode %{
 4701     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4702   %}
 4703 
 4704   ins_pipe(iload_reg_mem);
 4705 %}
 4706 
 4707 // Load Klass Pointer
 4708 instruct loadKlass(iRegPNoSp dst, memory mem)
 4709 %{
 4710   match(Set dst (LoadKlass mem));
 4711 
 4712   ins_cost(LOAD_COST);
 4713   format %{ "ld  $dst, $mem\t# class, #@loadKlass" %}
 4714 
 4715   ins_encode %{
 4716     __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4717   %}
 4718 
 4719   ins_pipe(iload_reg_mem);
 4720 %}
 4721 
 4722 // Load Narrow Klass Pointer
 4723 instruct loadNKlass(iRegNNoSp dst, memory mem)
 4724 %{
 4725   predicate(!UseCompactObjectHeaders);
 4726   match(Set dst (LoadNKlass mem));
 4727 
 4728   ins_cost(LOAD_COST);
 4729   format %{ "lwu  $dst, $mem\t# compressed class ptr, #@loadNKlass" %}
 4730 
 4731   ins_encode %{
 4732     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4733   %}
 4734 
 4735   ins_pipe(iload_reg_mem);
 4736 %}
 4737 
 4738 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory mem)
 4739 %{
 4740   predicate(UseCompactObjectHeaders);
 4741   match(Set dst (LoadNKlass mem));
 4742 
 4743   ins_cost(LOAD_COST);
 4744   format %{
 4745     "lwu  $dst, $mem\t# compressed klass ptr, shifted\n\t"
 4746     "srli $dst, $dst, markWord::klass_shift_at_offset"
 4747   %}
 4748 
 4749   ins_encode %{
 4750     __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4751     __ srli(as_Register($dst$$reg), as_Register($dst$$reg), (unsigned) markWord::klass_shift_at_offset);
 4752   %}
 4753 
 4754   ins_pipe(iload_reg_mem);
 4755 %}
 4756 
 4757 // Load Float
 4758 instruct loadF(fRegF dst, memory mem)
 4759 %{
 4760   match(Set dst (LoadF mem));
 4761 
 4762   ins_cost(LOAD_COST);
 4763   format %{ "flw  $dst, $mem\t# float, #@loadF" %}
 4764 
 4765   ins_encode %{
 4766     __ flw(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4767   %}
 4768 
 4769   ins_pipe(fp_load_mem_s);
 4770 %}
 4771 
 4772 // Load Double
 4773 instruct loadD(fRegD dst, memory mem)
 4774 %{
 4775   match(Set dst (LoadD mem));
 4776 
 4777   ins_cost(LOAD_COST);
 4778   format %{ "fld  $dst, $mem\t# double, #@loadD" %}
 4779 
 4780   ins_encode %{
 4781     __ fld(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
 4782   %}
 4783 
 4784   ins_pipe(fp_load_mem_d);
 4785 %}
 4786 
 4787 // Load Int Constant
 4788 instruct loadConI(iRegINoSp dst, immI src)
 4789 %{
 4790   match(Set dst src);
 4791 
 4792   ins_cost(ALU_COST);
 4793   format %{ "mv $dst, $src\t# int, #@loadConI" %}
 4794 
 4795   ins_encode(riscv_enc_mov_imm(dst, src));
 4796 
 4797   ins_pipe(ialu_imm);
 4798 %}
 4799 
 4800 // Load Long Constant
 4801 instruct loadConL(iRegLNoSp dst, immL src)
 4802 %{
 4803   match(Set dst src);
 4804 
 4805   ins_cost(ALU_COST);
 4806   format %{ "mv $dst, $src\t# long, #@loadConL" %}
 4807 
 4808   ins_encode(riscv_enc_mov_imm(dst, src));
 4809 
 4810   ins_pipe(ialu_imm);
 4811 %}
 4812 
 4813 // Load Pointer Constant
 4814 instruct loadConP(iRegPNoSp dst, immP con)
 4815 %{
 4816   match(Set dst con);
 4817 
 4818   ins_cost(ALU_COST);
 4819   format %{ "mv  $dst, $con\t# ptr, #@loadConP" %}
 4820 
 4821   ins_encode(riscv_enc_mov_p(dst, con));
 4822 
 4823   ins_pipe(ialu_imm);
 4824 %}
 4825 
 4826 // Load Null Pointer Constant
 4827 instruct loadConP0(iRegPNoSp dst, immP0 con)
 4828 %{
 4829   match(Set dst con);
 4830 
 4831   ins_cost(ALU_COST);
 4832   format %{ "mv  $dst, $con\t# null pointer, #@loadConP0" %}
 4833 
 4834   ins_encode(riscv_enc_mov_zero(dst));
 4835 
 4836   ins_pipe(ialu_imm);
 4837 %}
 4838 
 4839 // Load Pointer Constant One
 4840 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 4841 %{
 4842   match(Set dst con);
 4843 
 4844   ins_cost(ALU_COST);
 4845   format %{ "mv  $dst, $con\t# load ptr constant one, #@loadConP1" %}
 4846 
 4847   ins_encode(riscv_enc_mov_p1(dst));
 4848 
 4849   ins_pipe(ialu_imm);
 4850 %}
 4851 
 4852 // Load Byte Map Base Constant
 4853 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 4854 %{
 4855   match(Set dst con);
 4856   ins_cost(ALU_COST);
 4857   format %{ "mv  $dst, $con\t# Byte Map Base, #@loadByteMapBase" %}
 4858 
 4859   ins_encode(riscv_enc_mov_byte_map_base(dst));
 4860 
 4861   ins_pipe(ialu_imm);
 4862 %}
 4863 
 4864 // Load Narrow Pointer Constant
 4865 instruct loadConN(iRegNNoSp dst, immN con)
 4866 %{
 4867   match(Set dst con);
 4868 
 4869   ins_cost(ALU_COST * 4);
 4870   format %{ "mv  $dst, $con\t# compressed ptr, #@loadConN" %}
 4871 
 4872   ins_encode(riscv_enc_mov_n(dst, con));
 4873 
 4874   ins_pipe(ialu_imm);
 4875 %}
 4876 
 4877 // Load Narrow Null Pointer Constant
 4878 instruct loadConN0(iRegNNoSp dst, immN0 con)
 4879 %{
 4880   match(Set dst con);
 4881 
 4882   ins_cost(ALU_COST);
 4883   format %{ "mv  $dst, $con\t# compressed null pointer, #@loadConN0" %}
 4884 
 4885   ins_encode(riscv_enc_mov_zero(dst));
 4886 
 4887   ins_pipe(ialu_imm);
 4888 %}
 4889 
 4890 // Load Narrow Klass Constant
 4891 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 4892 %{
 4893   match(Set dst con);
 4894 
 4895   ins_cost(ALU_COST * 6);
 4896   format %{ "mv  $dst, $con\t# compressed klass ptr, #@loadConNKlass" %}
 4897 
 4898   ins_encode(riscv_enc_mov_nk(dst, con));
 4899 
 4900   ins_pipe(ialu_imm);
 4901 %}
 4902 
 4903 // Load Half Float Constant
 4904 instruct loadConH(fRegF dst, immH con) %{
 4905   match(Set dst con);
 4906 
 4907   ins_cost(LOAD_COST);
 4908   format %{
 4909     "flh $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConH"
 4910   %}
 4911 
 4912   ins_encode %{
 4913     assert(UseZfh || UseZfhmin, "must");
 4914     if (MacroAssembler::can_hf_imm_load($con$$constant)) {
 4915       __ fli_h(as_FloatRegister($dst$$reg), $con$$constant);
 4916     } else {
 4917       __ flh(as_FloatRegister($dst$$reg), $constantaddress($con));
 4918     }
 4919   %}
 4920 
 4921   ins_pipe(fp_load_constant_s);
 4922 %}
 4923 
 4924 instruct loadConH0(fRegF dst, immH0 con) %{
 4925   match(Set dst con);
 4926 
 4927   ins_cost(XFER_COST);
 4928 
 4929   format %{ "fmv.h.x $dst, zr\t# float, #@loadConH0" %}
 4930 
 4931   ins_encode %{
 4932     assert(UseZfh || UseZfhmin, "must");
 4933     __ fmv_h_x(as_FloatRegister($dst$$reg), zr);
 4934   %}
 4935 
 4936   ins_pipe(fp_load_constant_s);
 4937 %}
 4938 
 4939 // Load Float Constant
 4940 instruct loadConF(fRegF dst, immF con) %{
 4941   match(Set dst con);
 4942 
 4943   ins_cost(LOAD_COST);
 4944   format %{
 4945     "flw $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConF"
 4946   %}
 4947 
 4948   ins_encode %{
 4949     if (MacroAssembler::can_fp_imm_load($con$$constant)) {
 4950       __ fli_s(as_FloatRegister($dst$$reg), $con$$constant);
 4951     } else {
 4952       __ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
 4953     }
 4954   %}
 4955 
 4956   ins_pipe(fp_load_constant_s);
 4957 %}
 4958 
 4959 instruct loadConF0(fRegF dst, immF0 con) %{
 4960   match(Set dst con);
 4961 
 4962   ins_cost(XFER_COST);
 4963 
 4964   format %{ "fmv.w.x $dst, zr\t# float, #@loadConF0" %}
 4965 
 4966   ins_encode %{
 4967     __ fmv_w_x(as_FloatRegister($dst$$reg), zr);
 4968   %}
 4969 
 4970   ins_pipe(fp_load_constant_s);
 4971 %}
 4972 
 4973 // Load Double Constant
 4974 instruct loadConD(fRegD dst, immD con) %{
 4975   match(Set dst con);
 4976 
 4977   ins_cost(LOAD_COST);
 4978   format %{
 4979     "fld $dst, [$constantaddress]\t# load from constant table: double=$con, #@loadConD"
 4980   %}
 4981 
 4982   ins_encode %{
 4983     if (MacroAssembler::can_dp_imm_load($con$$constant)) {
 4984       __ fli_d(as_FloatRegister($dst$$reg), $con$$constant);
 4985     } else {
 4986       __ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
 4987     }
 4988   %}
 4989 
 4990   ins_pipe(fp_load_constant_d);
 4991 %}
 4992 
 4993 instruct loadConD0(fRegD dst, immD0 con) %{
 4994   match(Set dst con);
 4995 
 4996   ins_cost(XFER_COST);
 4997 
 4998   format %{ "fmv.d.x $dst, zr\t# double, #@loadConD0" %}
 4999 
 5000   ins_encode %{
 5001     __ fmv_d_x(as_FloatRegister($dst$$reg), zr);
 5002   %}
 5003 
 5004   ins_pipe(fp_load_constant_d);
 5005 %}
 5006 
 5007 // Store Byte
 5008 instruct storeB(iRegIorL2I src, memory mem)
 5009 %{
 5010   match(Set mem (StoreB mem src));
 5011 
 5012   ins_cost(STORE_COST);
 5013   format %{ "sb  $src, $mem\t# byte, #@storeB" %}
 5014 
 5015   ins_encode %{
 5016     __ sb(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5017   %}
 5018 
 5019   ins_pipe(istore_reg_mem);
 5020 %}
 5021 
 5022 instruct storeimmB0(immI0 zero, memory mem)
 5023 %{
 5024   match(Set mem (StoreB mem zero));
 5025 
 5026   ins_cost(STORE_COST);
 5027   format %{ "sb zr, $mem\t# byte, #@storeimmB0" %}
 5028 
 5029   ins_encode %{
 5030     __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
 5031   %}
 5032 
 5033   ins_pipe(istore_mem);
 5034 %}
 5035 
 5036 // Store Char/Short
 5037 instruct storeC(iRegIorL2I src, memory mem)
 5038 %{
 5039   match(Set mem (StoreC mem src));
 5040 
 5041   ins_cost(STORE_COST);
 5042   format %{ "sh  $src, $mem\t# short, #@storeC" %}
 5043 
 5044   ins_encode %{
 5045     __ sh(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5046   %}
 5047 
 5048   ins_pipe(istore_reg_mem);
 5049 %}
 5050 
 5051 instruct storeimmC0(immI0 zero, memory mem)
 5052 %{
 5053   match(Set mem (StoreC mem zero));
 5054 
 5055   ins_cost(STORE_COST);
 5056   format %{ "sh  zr, $mem\t# short, #@storeimmC0" %}
 5057 
 5058   ins_encode %{
 5059     __ sh(zr, Address(as_Register($mem$$base), $mem$$disp));
 5060   %}
 5061 
 5062   ins_pipe(istore_mem);
 5063 %}
 5064 
 5065 // Store Integer
 5066 instruct storeI(iRegIorL2I src, memory mem)
 5067 %{
 5068   match(Set mem(StoreI mem src));
 5069 
 5070   ins_cost(STORE_COST);
 5071   format %{ "sw  $src, $mem\t# int, #@storeI" %}
 5072 
 5073   ins_encode %{
 5074     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5075   %}
 5076 
 5077   ins_pipe(istore_reg_mem);
 5078 %}
 5079 
 5080 instruct storeimmI0(immI0 zero, memory mem)
 5081 %{
 5082   match(Set mem(StoreI mem zero));
 5083 
 5084   ins_cost(STORE_COST);
 5085   format %{ "sw  zr, $mem\t# int, #@storeimmI0" %}
 5086 
 5087   ins_encode %{
 5088     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5089   %}
 5090 
 5091   ins_pipe(istore_mem);
 5092 %}
 5093 
 5094 // Store Long (64 bit signed)
 5095 instruct storeL(iRegL src, memory mem)
 5096 %{
 5097   match(Set mem (StoreL mem src));
 5098 
 5099   ins_cost(STORE_COST);
 5100   format %{ "sd  $src, $mem\t# long, #@storeL" %}
 5101 
 5102   ins_encode %{
 5103     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5104   %}
 5105 
 5106   ins_pipe(istore_reg_mem);
 5107 %}
 5108 
 5109 // Store Long (64 bit signed)
 5110 instruct storeimmL0(immL0 zero, memory mem)
 5111 %{
 5112   match(Set mem (StoreL mem zero));
 5113 
 5114   ins_cost(STORE_COST);
 5115   format %{ "sd  zr, $mem\t# long, #@storeimmL0" %}
 5116 
 5117   ins_encode %{
 5118     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5119   %}
 5120 
 5121   ins_pipe(istore_mem);
 5122 %}
 5123 
 5124 // Store Pointer
 5125 instruct storeP(iRegP src, memory mem)
 5126 %{
 5127   match(Set mem (StoreP mem src));
 5128   predicate(n->as_Store()->barrier_data() == 0);
 5129 
 5130   ins_cost(STORE_COST);
 5131   format %{ "sd  $src, $mem\t# ptr, #@storeP" %}
 5132 
 5133   ins_encode %{
 5134     __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5135   %}
 5136 
 5137   ins_pipe(istore_reg_mem);
 5138 %}
 5139 
 5140 // Store Pointer
 5141 instruct storeimmP0(immP0 zero, memory mem)
 5142 %{
 5143   match(Set mem (StoreP mem zero));
 5144   predicate(n->as_Store()->barrier_data() == 0);
 5145 
 5146   ins_cost(STORE_COST);
 5147   format %{ "sd zr, $mem\t# ptr, #@storeimmP0" %}
 5148 
 5149   ins_encode %{
 5150     __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
 5151   %}
 5152 
 5153   ins_pipe(istore_mem);
 5154 %}
 5155 
 5156 // Store Compressed Pointer
 5157 instruct storeN(iRegN src, memory mem)
 5158 %{
 5159   predicate(n->as_Store()->barrier_data() == 0);
 5160   match(Set mem (StoreN mem src));
 5161 
 5162   ins_cost(STORE_COST);
 5163   format %{ "sw  $src, $mem\t# compressed ptr, #@storeN" %}
 5164 
 5165   ins_encode %{
 5166     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5167   %}
 5168 
 5169   ins_pipe(istore_reg_mem);
 5170 %}
 5171 
 5172 instruct storeImmN0(immN0 zero, memory mem)
 5173 %{
 5174   predicate(n->as_Store()->barrier_data() == 0);
 5175   match(Set mem (StoreN mem zero));
 5176 
 5177   ins_cost(STORE_COST);
 5178   format %{ "sw  zr, $mem\t# compressed ptr, #@storeImmN0" %}
 5179 
 5180   ins_encode %{
 5181     __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
 5182   %}
 5183 
 5184   ins_pipe(istore_reg_mem);
 5185 %}
 5186 
 5187 // Store Float
 5188 instruct storeF(fRegF src, memory mem)
 5189 %{
 5190   match(Set mem (StoreF mem src));
 5191 
 5192   ins_cost(STORE_COST);
 5193   format %{ "fsw  $src, $mem\t# float, #@storeF" %}
 5194 
 5195   ins_encode %{
 5196     __ fsw(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5197   %}
 5198 
 5199   ins_pipe(fp_store_reg_s);
 5200 %}
 5201 
 5202 // Store Double
 5203 instruct storeD(fRegD src, memory mem)
 5204 %{
 5205   match(Set mem (StoreD mem src));
 5206 
 5207   ins_cost(STORE_COST);
 5208   format %{ "fsd  $src, $mem\t# double, #@storeD" %}
 5209 
 5210   ins_encode %{
 5211     __ fsd(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5212   %}
 5213 
 5214   ins_pipe(fp_store_reg_d);
 5215 %}
 5216 
 5217 // Store Compressed Klass Pointer
 5218 instruct storeNKlass(iRegN src, memory mem)
 5219 %{
 5220   match(Set mem (StoreNKlass mem src));
 5221 
 5222   ins_cost(STORE_COST);
 5223   format %{ "sw  $src, $mem\t# compressed klass ptr, #@storeNKlass" %}
 5224 
 5225   ins_encode %{
 5226     __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
 5227   %}
 5228 
 5229   ins_pipe(istore_reg_mem);
 5230 %}
 5231 
 5232 // ============================================================================
 5233 // Prefetch instructions
 5234 // Must be safe to execute with invalid address (cannot fault).
 5235 
 5236 instruct prefetchalloc( memory mem ) %{
 5237   predicate(UseZicbop);
 5238   match(PrefetchAllocation mem);
 5239 
 5240   ins_cost(ALU_COST * 1);
 5241   format %{ "prefetch_w $mem\t# Prefetch for write" %}
 5242 
 5243   ins_encode %{
 5244     if (Assembler::is_simm12($mem$$disp)) {
 5245       if (($mem$$disp & 0x1f) == 0) {
 5246         __ prefetch_w(as_Register($mem$$base), $mem$$disp);
 5247       } else {
 5248         __ addi(t0, as_Register($mem$$base), $mem$$disp);
 5249         __ prefetch_w(t0, 0);
 5250       }
 5251     } else {
 5252       __ mv(t0, $mem$$disp);
 5253       __ add(t0, as_Register($mem$$base), t0);
 5254       __ prefetch_w(t0, 0);
 5255     }
 5256   %}
 5257 
 5258   ins_pipe(iload_prefetch);
 5259 %}
 5260 
 5261 // ============================================================================
 5262 // Atomic operation instructions
 5263 //
 5264 
 5265 // standard CompareAndSwapX when we are using barriers
 5266 // these have higher priority than the rules selected by a predicate
 5267 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5268                          iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5269 %{
 5270   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5271 
 5272   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4);
 5273 
 5274   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5275 
 5276   format %{
 5277     "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5278     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB"
 5279   %}
 5280 
 5281   ins_encode %{
 5282     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5283                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5284                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5285   %}
 5286 
 5287   ins_pipe(pipe_slow);
 5288 %}
 5289 
 5290 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5291                          iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5292 %{
 5293   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5294 
 5295   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4);
 5296 
 5297   effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
 5298 
 5299   format %{
 5300     "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5301     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS"
 5302   %}
 5303 
 5304   ins_encode %{
 5305     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5306                             Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
 5307                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5308   %}
 5309 
 5310   ins_pipe(pipe_slow);
 5311 %}
 5312 
 5313 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5314 %{
 5315   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5316 
 5317   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5318 
 5319   format %{
 5320     "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5321     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI"
 5322   %}
 5323 
 5324   ins_encode(riscv_enc_cmpxchgw(res, mem, oldval, newval));
 5325 
 5326   ins_pipe(pipe_slow);
 5327 %}
 5328 
 5329 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5330 %{
 5331   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5332 
 5333   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5334 
 5335   format %{
 5336     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5337     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL"
 5338   %}
 5339 
 5340   ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval));
 5341 
 5342   ins_pipe(pipe_slow);
 5343 %}
 5344 
 5345 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5346 %{
 5347   predicate(n->as_LoadStore()->barrier_data() == 0);
 5348 
 5349   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5350 
 5351   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5352 
 5353   format %{
 5354     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5355     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP"
 5356   %}
 5357 
 5358   ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval));
 5359 
 5360   ins_pipe(pipe_slow);
 5361 %}
 5362 
 5363 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5364 %{
 5365   predicate(n->as_LoadStore()->barrier_data() == 0);
 5366   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5367 
 5368   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4);
 5369 
 5370   format %{
 5371     "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5372     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN"
 5373   %}
 5374 
 5375   ins_encode(riscv_enc_cmpxchgn(res, mem, oldval, newval));
 5376 
 5377   ins_pipe(pipe_slow);
 5378 %}
 5379 
 5380 // alternative CompareAndSwapX when we are eliding barriers
 5381 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5382                             iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5383 %{
 5384   predicate(needs_acquiring_load_reserved(n));
 5385 
 5386   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 5387 
 5388   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4);
 5389 
 5390   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5391 
 5392   format %{
 5393     "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
 5394     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq"
 5395   %}
 5396 
 5397   ins_encode %{
 5398     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5399                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5400                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5401   %}
 5402 
 5403   ins_pipe(pipe_slow);
 5404 %}
 5405 
 5406 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5407                             iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5408 %{
 5409   predicate(needs_acquiring_load_reserved(n));
 5410 
 5411   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 5412 
 5413   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4);
 5414 
 5415   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5416 
 5417   format %{
 5418     "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
 5419     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq"
 5420   %}
 5421 
 5422   ins_encode %{
 5423     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5424                             Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
 5425                             true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5426   %}
 5427 
 5428   ins_pipe(pipe_slow);
 5429 %}
 5430 
 5431 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5432 %{
 5433   predicate(needs_acquiring_load_reserved(n));
 5434 
 5435   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 5436 
 5437   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5438 
 5439   format %{
 5440     "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
 5441     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq"
 5442   %}
 5443 
 5444   ins_encode(riscv_enc_cmpxchgw_acq(res, mem, oldval, newval));
 5445 
 5446   ins_pipe(pipe_slow);
 5447 %}
 5448 
 5449 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5450 %{
 5451   predicate(needs_acquiring_load_reserved(n));
 5452 
 5453   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 5454 
 5455   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5456 
 5457   format %{
 5458     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
 5459     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq"
 5460   %}
 5461 
 5462   ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval));
 5463 
 5464   ins_pipe(pipe_slow);
 5465 %}
 5466 
 5467 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5468 %{
 5469   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5470 
 5471   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 5472 
 5473   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4);
 5474 
 5475   format %{
 5476     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
 5477     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq"
 5478   %}
 5479 
 5480   ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval));
 5481 
 5482   ins_pipe(pipe_slow);
 5483 %}
 5484 
 5485 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5486 %{
 5487   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5488 
 5489   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 5490 
 5491   ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4);
 5492 
 5493   format %{
 5494     "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
 5495     "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq"
 5496   %}
 5497 
 5498   ins_encode(riscv_enc_cmpxchgn_acq(res, mem, oldval, newval));
 5499 
 5500   ins_pipe(pipe_slow);
 5501 %}
 5502 
 5503 // Sundry CAS operations.  Note that release is always true,
 5504 // regardless of the memory ordering of the CAS.  This is because we
 5505 // need the volatile case to be sequentially consistent but there is
 5506 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 5507 // can't check the type of memory ordering here, so we always emit a
 5508 // sc_d(w) with rl bit set.
 5509 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5510                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5511 %{
 5512   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5513 
 5514   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5);
 5515 
 5516   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5517 
 5518   format %{
 5519     "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB"
 5520   %}
 5521 
 5522   ins_encode %{
 5523     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5524                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5525                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5526   %}
 5527 
 5528   ins_pipe(pipe_slow);
 5529 %}
 5530 
 5531 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5532                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5533 %{
 5534   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5535 
 5536   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6);
 5537 
 5538   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5539 
 5540   format %{
 5541     "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS"
 5542   %}
 5543 
 5544   ins_encode %{
 5545     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5546                             /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5547                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5548   %}
 5549 
 5550   ins_pipe(pipe_slow);
 5551 %}
 5552 
 5553 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5554 %{
 5555   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5556 
 5557   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5558 
 5559   effect(TEMP_DEF res);
 5560 
 5561   format %{
 5562     "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI"
 5563   %}
 5564 
 5565   ins_encode %{
 5566     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5567                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5568   %}
 5569 
 5570   ins_pipe(pipe_slow);
 5571 %}
 5572 
 5573 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5574 %{
 5575   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5576 
 5577   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5578 
 5579   effect(TEMP_DEF res);
 5580 
 5581   format %{
 5582     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL"
 5583   %}
 5584 
 5585   ins_encode %{
 5586     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5587                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5588   %}
 5589 
 5590   ins_pipe(pipe_slow);
 5591 %}
 5592 
 5593 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5594 %{
 5595   predicate(n->as_LoadStore()->barrier_data() == 0);
 5596   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5597 
 5598   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 3);
 5599 
 5600   effect(TEMP_DEF res);
 5601 
 5602   format %{
 5603     "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN"
 5604   %}
 5605 
 5606   ins_encode %{
 5607     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5608                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5609   %}
 5610 
 5611   ins_pipe(pipe_slow);
 5612 %}
 5613 
 5614 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5615 %{
 5616   predicate(n->as_LoadStore()->barrier_data() == 0);
 5617   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5618 
 5619   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5620 
 5621   effect(TEMP_DEF res);
 5622 
 5623   format %{
 5624     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP"
 5625   %}
 5626 
 5627   ins_encode %{
 5628     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5629                /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5630   %}
 5631 
 5632   ins_pipe(pipe_slow);
 5633 %}
 5634 
 5635 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5636                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5637 %{
 5638   predicate(needs_acquiring_load_reserved(n));
 5639 
 5640   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 5641 
 5642   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5);
 5643 
 5644   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5645 
 5646   format %{
 5647     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq"
 5648   %}
 5649 
 5650   ins_encode %{
 5651     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5652                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5653                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5654   %}
 5655 
 5656   ins_pipe(pipe_slow);
 5657 %}
 5658 
 5659 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5660                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5661 %{
 5662   predicate(needs_acquiring_load_reserved(n));
 5663 
 5664   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 5665 
 5666   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6);
 5667 
 5668   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5669 
 5670   format %{
 5671     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq"
 5672   %}
 5673 
 5674   ins_encode %{
 5675     __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5676                             /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5677                             /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5678   %}
 5679 
 5680   ins_pipe(pipe_slow);
 5681 %}
 5682 
 5683 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5684 %{
 5685   predicate(needs_acquiring_load_reserved(n));
 5686 
 5687   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 5688 
 5689   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5690 
 5691   effect(TEMP_DEF res);
 5692 
 5693   format %{
 5694     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq"
 5695   %}
 5696 
 5697   ins_encode %{
 5698     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5699                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5700   %}
 5701 
 5702   ins_pipe(pipe_slow);
 5703 %}
 5704 
 5705 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
 5706 %{
 5707   predicate(needs_acquiring_load_reserved(n));
 5708 
 5709   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 5710 
 5711   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5712 
 5713   effect(TEMP_DEF res);
 5714 
 5715   format %{
 5716     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq"
 5717   %}
 5718 
 5719   ins_encode %{
 5720     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5721                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5722   %}
 5723 
 5724   ins_pipe(pipe_slow);
 5725 %}
 5726 
 5727 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
 5728 %{
 5729   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5730 
 5731   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 5732 
 5733   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5734 
 5735   effect(TEMP_DEF res);
 5736 
 5737   format %{
 5738     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq"
 5739   %}
 5740 
 5741   ins_encode %{
 5742     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5743                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5744   %}
 5745 
 5746   ins_pipe(pipe_slow);
 5747 %}
 5748 
 5749 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
 5750 %{
 5751   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 5752 
 5753   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 5754 
 5755   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST);
 5756 
 5757   effect(TEMP_DEF res);
 5758 
 5759   format %{
 5760     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq"
 5761   %}
 5762 
 5763   ins_encode %{
 5764     __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5765                /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5766   %}
 5767 
 5768   ins_pipe(pipe_slow);
 5769 %}
 5770 
 5771 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5772                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5773 %{
 5774   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5775 
 5776   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6);
 5777 
 5778   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5779 
 5780   format %{
 5781     "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5782     "# $res == 1 when success, #@weakCompareAndSwapB"
 5783   %}
 5784 
 5785   ins_encode %{
 5786     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5787                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5788                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5789   %}
 5790 
 5791   ins_pipe(pipe_slow);
 5792 %}
 5793 
 5794 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5795                              iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5796 %{
 5797   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5798 
 5799   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7);
 5800 
 5801   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5802 
 5803   format %{
 5804     "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5805     "# $res == 1 when success, #@weakCompareAndSwapS"
 5806   %}
 5807 
 5808   ins_encode %{
 5809     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5810                                  /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
 5811                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5812   %}
 5813 
 5814   ins_pipe(pipe_slow);
 5815 %}
 5816 
 5817 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5818 %{
 5819   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 5820 
 5821   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5822 
 5823   format %{
 5824     "weak_cmpxchg $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5825     "# $res == 1 when success, #@weakCompareAndSwapI"
 5826   %}
 5827 
 5828   ins_encode %{
 5829     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5830                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5831   %}
 5832 
 5833   ins_pipe(pipe_slow);
 5834 %}
 5835 
 5836 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5837 %{
 5838   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 5839 
 5840   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5841 
 5842   format %{
 5843     "weak_cmpxchg $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5844     "# $res == 1 when success, #@weakCompareAndSwapL"
 5845   %}
 5846 
 5847   ins_encode %{
 5848     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5849                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5850   %}
 5851 
 5852   ins_pipe(pipe_slow);
 5853 %}
 5854 
 5855 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5856 %{
 5857   predicate(n->as_LoadStore()->barrier_data() == 0);
 5858   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 5859 
 5860   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4);
 5861 
 5862   format %{
 5863     "weak_cmpxchg $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5864     "# $res == 1 when success, #@weakCompareAndSwapN"
 5865   %}
 5866 
 5867   ins_encode %{
 5868     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 5869                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5870   %}
 5871 
 5872   ins_pipe(pipe_slow);
 5873 %}
 5874 
 5875 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 5876 %{
 5877   predicate(n->as_LoadStore()->barrier_data() == 0);
 5878   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 5879 
 5880   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5881 
 5882   format %{
 5883     "weak_cmpxchg $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5884     "# $res == 1 when success, #@weakCompareAndSwapP"
 5885   %}
 5886 
 5887   ins_encode %{
 5888     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5889                     /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
 5890   %}
 5891 
 5892   ins_pipe(pipe_slow);
 5893 %}
 5894 
 5895 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5896                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5897 %{
 5898   predicate(needs_acquiring_load_reserved(n));
 5899 
 5900   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 5901 
 5902   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6);
 5903 
 5904   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5905 
 5906   format %{
 5907     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5908     "# $res == 1 when success, #@weakCompareAndSwapBAcq"
 5909   %}
 5910 
 5911   ins_encode %{
 5912     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
 5913                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5914                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5915   %}
 5916 
 5917   ins_pipe(pipe_slow);
 5918 %}
 5919 
 5920 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
 5921                                 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
 5922 %{
 5923   predicate(needs_acquiring_load_reserved(n));
 5924 
 5925   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 5926 
 5927   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7);
 5928 
 5929   effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 5930 
 5931   format %{
 5932     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5933     "# $res == 1 when success, #@weakCompareAndSwapSAcq"
 5934   %}
 5935 
 5936   ins_encode %{
 5937     __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
 5938                                  /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
 5939                                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 5940   %}
 5941 
 5942   ins_pipe(pipe_slow);
 5943 %}
 5944 
 5945 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
 5946 %{
 5947   predicate(needs_acquiring_load_reserved(n));
 5948 
 5949   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 5950 
 5951   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5952 
 5953   format %{
 5954     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5955     "# $res == 1 when success, #@weakCompareAndSwapIAcq"
 5956   %}
 5957 
 5958   ins_encode %{
 5959     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
 5960                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5961   %}
 5962 
 5963   ins_pipe(pipe_slow);
 5964 %}
 5965 
 5966 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
 5967 %{
 5968   predicate(needs_acquiring_load_reserved(n));
 5969 
 5970   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 5971 
 5972   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 5973 
 5974   format %{
 5975     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5976     "# $res == 1 when success, #@weakCompareAndSwapLAcq"
 5977   %}
 5978 
 5979   ins_encode %{
 5980     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 5981                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 5982   %}
 5983 
 5984   ins_pipe(pipe_slow);
 5985 %}
 5986 
 5987 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
 5988 %{
 5989   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 5990 
 5991   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 5992 
 5993   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4);
 5994 
 5995   format %{
 5996     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 5997     "# $res == 1 when success, #@weakCompareAndSwapNAcq"
 5998   %}
 5999 
 6000   ins_encode %{
 6001     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
 6002                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6003   %}
 6004 
 6005   ins_pipe(pipe_slow);
 6006 %}
 6007 
 6008 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
 6009 %{
 6010   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 6011 
 6012   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 6013 
 6014   ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2);
 6015 
 6016   format %{
 6017     "weak_cmpxchg_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
 6018     "\t# $res == 1 when success, #@weakCompareAndSwapPAcq"
 6019   %}
 6020 
 6021   ins_encode %{
 6022     __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
 6023                     /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
 6024   %}
 6025 
 6026   ins_pipe(pipe_slow);
 6027 %}
 6028 
 6029 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev)
 6030 %{
 6031   match(Set prev (GetAndSetI mem newv));
 6032 
 6033   ins_cost(ALU_COST);
 6034 
 6035   format %{ "atomic_xchgw  $prev, $newv, [$mem]\t#@get_and_setI" %}
 6036 
 6037   ins_encode %{
 6038     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6039   %}
 6040 
 6041   ins_pipe(pipe_serial);
 6042 %}
 6043 
 6044 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev)
 6045 %{
 6046   match(Set prev (GetAndSetL mem newv));
 6047 
 6048   ins_cost(ALU_COST);
 6049 
 6050   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setL" %}
 6051 
 6052   ins_encode %{
 6053     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6054   %}
 6055 
 6056   ins_pipe(pipe_serial);
 6057 %}
 6058 
 6059 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev)
 6060 %{
 6061   predicate(n->as_LoadStore()->barrier_data() == 0);
 6062 
 6063   match(Set prev (GetAndSetN mem newv));
 6064 
 6065   ins_cost(ALU_COST);
 6066 
 6067   format %{ "atomic_xchgwu $prev, $newv, [$mem]\t#@get_and_setN" %}
 6068 
 6069   ins_encode %{
 6070     __ atomic_xchgwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6071   %}
 6072 
 6073   ins_pipe(pipe_serial);
 6074 %}
 6075 
 6076 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev)
 6077 %{
 6078   predicate(n->as_LoadStore()->barrier_data() == 0);
 6079   match(Set prev (GetAndSetP mem newv));
 6080 
 6081   ins_cost(ALU_COST);
 6082 
 6083   format %{ "atomic_xchg  $prev, $newv, [$mem]\t#@get_and_setP" %}
 6084 
 6085   ins_encode %{
 6086     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6087   %}
 6088 
 6089   ins_pipe(pipe_serial);
 6090 %}
 6091 
 6092 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev)
 6093 %{
 6094   predicate(needs_acquiring_load_reserved(n));
 6095 
 6096   match(Set prev (GetAndSetI mem newv));
 6097 
 6098   ins_cost(ALU_COST);
 6099 
 6100   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]\t#@get_and_setIAcq" %}
 6101 
 6102   ins_encode %{
 6103     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6104   %}
 6105 
 6106   ins_pipe(pipe_serial);
 6107 %}
 6108 
 6109 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev)
 6110 %{
 6111   predicate(needs_acquiring_load_reserved(n));
 6112 
 6113   match(Set prev (GetAndSetL mem newv));
 6114 
 6115   ins_cost(ALU_COST);
 6116 
 6117   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setLAcq" %}
 6118 
 6119   ins_encode %{
 6120     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6121   %}
 6122 
 6123   ins_pipe(pipe_serial);
 6124 %}
 6125 
 6126 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev)
 6127 %{
 6128   predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
 6129 
 6130   match(Set prev (GetAndSetN mem newv));
 6131 
 6132   ins_cost(ALU_COST);
 6133 
 6134   format %{ "atomic_xchgwu_acq $prev, $newv, [$mem]\t#@get_and_setNAcq" %}
 6135 
 6136   ins_encode %{
 6137     __ atomic_xchgalwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6138   %}
 6139 
 6140   ins_pipe(pipe_serial);
 6141 %}
 6142 
 6143 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev)
 6144 %{
 6145   predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
 6146 
 6147   match(Set prev (GetAndSetP mem newv));
 6148 
 6149   ins_cost(ALU_COST);
 6150 
 6151   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]\t#@get_and_setPAcq" %}
 6152 
 6153   ins_encode %{
 6154     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 6155   %}
 6156 
 6157   ins_pipe(pipe_serial);
 6158 %}
 6159 
 6160 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr)
 6161 %{
 6162   match(Set newval (GetAndAddL mem incr));
 6163 
 6164   ins_cost(ALU_COST);
 6165 
 6166   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addL" %}
 6167 
 6168   ins_encode %{
 6169     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6170   %}
 6171 
 6172   ins_pipe(pipe_serial);
 6173 %}
 6174 
 6175 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr)
 6176 %{
 6177   predicate(n->as_LoadStore()->result_not_used());
 6178 
 6179   match(Set dummy (GetAndAddL mem incr));
 6180 
 6181   ins_cost(ALU_COST);
 6182 
 6183   format %{ "get_and_addL [$mem], $incr\t#@get_and_addL_no_res" %}
 6184 
 6185   ins_encode %{
 6186     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 6187   %}
 6188 
 6189   ins_pipe(pipe_serial);
 6190 %}
 6191 
 6192 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAdd incr)
 6193 %{
 6194   match(Set newval (GetAndAddL mem incr));
 6195 
 6196   ins_cost(ALU_COST);
 6197 
 6198   format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addLi" %}
 6199 
 6200   ins_encode %{
 6201     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6202   %}
 6203 
 6204   ins_pipe(pipe_serial);
 6205 %}
 6206 
 6207 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAdd incr)
 6208 %{
 6209   predicate(n->as_LoadStore()->result_not_used());
 6210 
 6211   match(Set dummy (GetAndAddL mem incr));
 6212 
 6213   ins_cost(ALU_COST);
 6214 
 6215   format %{ "get_and_addL [$mem], $incr\t#@get_and_addLi_no_res" %}
 6216 
 6217   ins_encode %{
 6218     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 6219   %}
 6220 
 6221   ins_pipe(pipe_serial);
 6222 %}
 6223 
 6224 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6225 %{
 6226   match(Set newval (GetAndAddI mem incr));
 6227 
 6228   ins_cost(ALU_COST);
 6229 
 6230   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addI" %}
 6231 
 6232   ins_encode %{
 6233     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6234   %}
 6235 
 6236   ins_pipe(pipe_serial);
 6237 %}
 6238 
 6239 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr)
 6240 %{
 6241   predicate(n->as_LoadStore()->result_not_used());
 6242 
 6243   match(Set dummy (GetAndAddI mem incr));
 6244 
 6245   ins_cost(ALU_COST);
 6246 
 6247   format %{ "get_and_addI [$mem], $incr\t#@get_and_addI_no_res" %}
 6248 
 6249   ins_encode %{
 6250     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 6251   %}
 6252 
 6253   ins_pipe(pipe_serial);
 6254 %}
 6255 
 6256 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAdd incr)
 6257 %{
 6258   match(Set newval (GetAndAddI mem incr));
 6259 
 6260   ins_cost(ALU_COST);
 6261 
 6262   format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addIi" %}
 6263 
 6264   ins_encode %{
 6265     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6266   %}
 6267 
 6268   ins_pipe(pipe_serial);
 6269 %}
 6270 
 6271 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAdd incr)
 6272 %{
 6273   predicate(n->as_LoadStore()->result_not_used());
 6274 
 6275   match(Set dummy (GetAndAddI mem incr));
 6276 
 6277   ins_cost(ALU_COST);
 6278 
 6279   format %{ "get_and_addI [$mem], $incr\t#@get_and_addIi_no_res" %}
 6280 
 6281   ins_encode %{
 6282     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 6283   %}
 6284 
 6285   ins_pipe(pipe_serial);
 6286 %}
 6287 
 6288 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr)
 6289 %{
 6290   predicate(needs_acquiring_load_reserved(n));
 6291 
 6292   match(Set newval (GetAndAddL mem incr));
 6293 
 6294   ins_cost(ALU_COST);
 6295 
 6296   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLAcq" %}
 6297 
 6298   ins_encode %{
 6299     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6300   %}
 6301 
 6302   ins_pipe(pipe_serial);
 6303 %}
 6304 
 6305 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 6306   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6307 
 6308   match(Set dummy (GetAndAddL mem incr));
 6309 
 6310   ins_cost(ALU_COST);
 6311 
 6312   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addL_no_resAcq" %}
 6313 
 6314   ins_encode %{
 6315     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 6316   %}
 6317 
 6318   ins_pipe(pipe_serial);
 6319 %}
 6320 
 6321 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAdd incr)
 6322 %{
 6323   predicate(needs_acquiring_load_reserved(n));
 6324 
 6325   match(Set newval (GetAndAddL mem incr));
 6326 
 6327   ins_cost(ALU_COST);
 6328 
 6329   format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLiAcq" %}
 6330 
 6331   ins_encode %{
 6332     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6333   %}
 6334 
 6335   ins_pipe(pipe_serial);
 6336 %}
 6337 
 6338 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAdd incr)
 6339 %{
 6340   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6341 
 6342   match(Set dummy (GetAndAddL mem incr));
 6343 
 6344   ins_cost(ALU_COST);
 6345 
 6346   format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addLi_no_resAcq" %}
 6347 
 6348   ins_encode %{
 6349     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 6350   %}
 6351 
 6352   ins_pipe(pipe_serial);
 6353 %}
 6354 
 6355 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr)
 6356 %{
 6357   predicate(needs_acquiring_load_reserved(n));
 6358 
 6359   match(Set newval (GetAndAddI mem incr));
 6360 
 6361   ins_cost(ALU_COST);
 6362 
 6363   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIAcq" %}
 6364 
 6365   ins_encode %{
 6366     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 6367   %}
 6368 
 6369   ins_pipe(pipe_serial);
 6370 %}
 6371 
 6372 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr)
 6373 %{
 6374   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6375 
 6376   match(Set dummy (GetAndAddI mem incr));
 6377 
 6378   ins_cost(ALU_COST);
 6379 
 6380   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addI_no_resAcq" %}
 6381 
 6382   ins_encode %{
 6383     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 6384   %}
 6385 
 6386   ins_pipe(pipe_serial);
 6387 %}
 6388 
 6389 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAdd incr)
 6390 %{
 6391   predicate(needs_acquiring_load_reserved(n));
 6392 
 6393   match(Set newval (GetAndAddI mem incr));
 6394 
 6395   ins_cost(ALU_COST);
 6396 
 6397   format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIiAcq" %}
 6398 
 6399   ins_encode %{
 6400     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 6401   %}
 6402 
 6403   ins_pipe(pipe_serial);
 6404 %}
 6405 
 6406 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAdd incr)
 6407 %{
 6408   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
 6409 
 6410   match(Set dummy (GetAndAddI mem incr));
 6411 
 6412   ins_cost(ALU_COST);
 6413 
 6414   format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addIi_no_resAcq" %}
 6415 
 6416   ins_encode %{
 6417     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 6418   %}
 6419 
 6420   ins_pipe(pipe_serial);
 6421 %}
 6422 
 6423 // ============================================================================
 6424 // Arithmetic Instructions
 6425 //
 6426 
 6427 // Integer Addition
 6428 
 6429 // TODO
 6430 // these currently employ operations which do not set CR and hence are
 6431 // not flagged as killing CR but we would like to isolate the cases
 6432 // where we want to set flags from those where we don't. need to work
 6433 // out how to do that.
 6434 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6435   match(Set dst (AddI src1 src2));
 6436 
 6437   ins_cost(ALU_COST);
 6438   format %{ "addw  $dst, $src1, $src2\t#@addI_reg_reg" %}
 6439 
 6440   ins_encode %{
 6441     __ addw(as_Register($dst$$reg),
 6442             as_Register($src1$$reg),
 6443             as_Register($src2$$reg));
 6444   %}
 6445 
 6446   ins_pipe(ialu_reg_reg);
 6447 %}
 6448 
 6449 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAdd src2) %{
 6450   match(Set dst (AddI src1 src2));
 6451 
 6452   ins_cost(ALU_COST);
 6453   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm" %}
 6454 
 6455   ins_encode %{
 6456     __ addiw(as_Register($dst$$reg),
 6457              as_Register($src1$$reg),
 6458              $src2$$constant);
 6459   %}
 6460 
 6461   ins_pipe(ialu_reg_imm);
 6462 %}
 6463 
 6464 instruct addI_reg_imm_l2i(iRegINoSp dst, iRegL src1, immIAdd src2) %{
 6465   match(Set dst (AddI (ConvL2I src1) src2));
 6466 
 6467   ins_cost(ALU_COST);
 6468   format %{ "addiw  $dst, $src1, $src2\t#@addI_reg_imm_l2i" %}
 6469 
 6470   ins_encode %{
 6471     __ addiw(as_Register($dst$$reg),
 6472              as_Register($src1$$reg),
 6473              $src2$$constant);
 6474   %}
 6475 
 6476   ins_pipe(ialu_reg_imm);
 6477 %}
 6478 
 6479 // Pointer Addition
 6480 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
 6481   match(Set dst (AddP src1 src2));
 6482 
 6483   ins_cost(ALU_COST);
 6484   format %{ "add $dst, $src1, $src2\t# ptr, #@addP_reg_reg" %}
 6485 
 6486   ins_encode %{
 6487     __ add(as_Register($dst$$reg),
 6488            as_Register($src1$$reg),
 6489            as_Register($src2$$reg));
 6490   %}
 6491 
 6492   ins_pipe(ialu_reg_reg);
 6493 %}
 6494 
 6495 // If we shift more than 32 bits, we need not convert I2L.
 6496 instruct lShiftL_regI_immGE32(iRegLNoSp dst, iRegI src, uimmI6_ge32 scale) %{
 6497   match(Set dst (LShiftL (ConvI2L src) scale));
 6498   ins_cost(ALU_COST);
 6499   format %{ "slli  $dst, $src, $scale & 63\t#@lShiftL_regI_immGE32" %}
 6500 
 6501   ins_encode %{
 6502     __ slli(as_Register($dst$$reg), as_Register($src$$reg), $scale$$constant & 63);
 6503   %}
 6504 
 6505   ins_pipe(ialu_reg_shift);
 6506 %}
 6507 
 6508 // Pointer Immediate Addition
 6509 // n.b. this needs to be more expensive than using an indirect memory
 6510 // operand
 6511 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAdd src2) %{
 6512   match(Set dst (AddP src1 src2));
 6513   ins_cost(ALU_COST);
 6514   format %{ "addi  $dst, $src1, $src2\t# ptr, #@addP_reg_imm" %}
 6515 
 6516   ins_encode %{
 6517     __ addi(as_Register($dst$$reg),
 6518             as_Register($src1$$reg),
 6519             $src2$$constant);
 6520   %}
 6521 
 6522   ins_pipe(ialu_reg_imm);
 6523 %}
 6524 
 6525 // Long Addition
 6526 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6527   match(Set dst (AddL src1 src2));
 6528   ins_cost(ALU_COST);
 6529   format %{ "add  $dst, $src1, $src2\t#@addL_reg_reg" %}
 6530 
 6531   ins_encode %{
 6532     __ add(as_Register($dst$$reg),
 6533            as_Register($src1$$reg),
 6534            as_Register($src2$$reg));
 6535   %}
 6536 
 6537   ins_pipe(ialu_reg_reg);
 6538 %}
 6539 
 6540 // No constant pool entries requiredLong Immediate Addition.
 6541 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 6542   match(Set dst (AddL src1 src2));
 6543   ins_cost(ALU_COST);
 6544   format %{ "addi  $dst, $src1, $src2\t#@addL_reg_imm" %}
 6545 
 6546   ins_encode %{
 6547     // src2 is imm, so actually call the addi
 6548     __ addi(as_Register($dst$$reg),
 6549             as_Register($src1$$reg),
 6550             $src2$$constant);
 6551   %}
 6552 
 6553   ins_pipe(ialu_reg_imm);
 6554 %}
 6555 
 6556 // Integer Subtraction
 6557 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6558   match(Set dst (SubI src1 src2));
 6559 
 6560   ins_cost(ALU_COST);
 6561   format %{ "subw  $dst, $src1, $src2\t#@subI_reg_reg" %}
 6562 
 6563   ins_encode %{
 6564     __ subw(as_Register($dst$$reg),
 6565             as_Register($src1$$reg),
 6566             as_Register($src2$$reg));
 6567   %}
 6568 
 6569   ins_pipe(ialu_reg_reg);
 6570 %}
 6571 
 6572 // Immediate Subtraction
 6573 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immISub src2) %{
 6574   match(Set dst (SubI src1 src2));
 6575 
 6576   ins_cost(ALU_COST);
 6577   format %{ "addiw  $dst, $src1, -$src2\t#@subI_reg_imm" %}
 6578 
 6579   ins_encode %{
 6580     // src2 is imm, so actually call the addiw
 6581     __ subiw(as_Register($dst$$reg),
 6582              as_Register($src1$$reg),
 6583              $src2$$constant);
 6584   %}
 6585 
 6586   ins_pipe(ialu_reg_imm);
 6587 %}
 6588 
 6589 // Long Subtraction
 6590 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6591   match(Set dst (SubL src1 src2));
 6592   ins_cost(ALU_COST);
 6593   format %{ "sub  $dst, $src1, $src2\t#@subL_reg_reg" %}
 6594 
 6595   ins_encode %{
 6596     __ sub(as_Register($dst$$reg),
 6597            as_Register($src1$$reg),
 6598            as_Register($src2$$reg));
 6599   %}
 6600 
 6601   ins_pipe(ialu_reg_reg);
 6602 %}
 6603 
 6604 // No constant pool entries requiredLong Immediate Subtraction.
 6605 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLSub src2) %{
 6606   match(Set dst (SubL src1 src2));
 6607   ins_cost(ALU_COST);
 6608   format %{ "addi  $dst, $src1, -$src2\t#@subL_reg_imm" %}
 6609 
 6610   ins_encode %{
 6611     // src2 is imm, so actually call the addi
 6612     __ subi(as_Register($dst$$reg),
 6613             as_Register($src1$$reg),
 6614             $src2$$constant);
 6615   %}
 6616 
 6617   ins_pipe(ialu_reg_imm);
 6618 %}
 6619 
 6620 // Integer Negation (special case for sub)
 6621 
 6622 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 6623   match(Set dst (SubI zero src));
 6624   ins_cost(ALU_COST);
 6625   format %{ "subw  $dst, x0, $src\t# int, #@negI_reg" %}
 6626 
 6627   ins_encode %{
 6628     // actually call the subw
 6629     __ negw(as_Register($dst$$reg),
 6630             as_Register($src$$reg));
 6631   %}
 6632 
 6633   ins_pipe(ialu_reg);
 6634 %}
 6635 
 6636 // Long Negation
 6637 
 6638 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero) %{
 6639   match(Set dst (SubL zero src));
 6640   ins_cost(ALU_COST);
 6641   format %{ "sub  $dst, x0, $src\t# long, #@negL_reg" %}
 6642 
 6643   ins_encode %{
 6644     // actually call the sub
 6645     __ neg(as_Register($dst$$reg),
 6646            as_Register($src$$reg));
 6647   %}
 6648 
 6649   ins_pipe(ialu_reg);
 6650 %}
 6651 
 6652 // Integer Multiply
 6653 
 6654 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6655   match(Set dst (MulI src1 src2));
 6656   ins_cost(IMUL_COST);
 6657   format %{ "mulw  $dst, $src1, $src2\t#@mulI" %}
 6658 
 6659   //this means 2 word multi, and no sign extend to 64 bits
 6660   ins_encode %{
 6661     // riscv64 mulw will sign-extension to high 32 bits in dst reg
 6662     __ mulw(as_Register($dst$$reg),
 6663             as_Register($src1$$reg),
 6664             as_Register($src2$$reg));
 6665   %}
 6666 
 6667   ins_pipe(imul_reg_reg);
 6668 %}
 6669 
 6670 // Long Multiply
 6671 
 6672 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6673   match(Set dst (MulL src1 src2));
 6674   ins_cost(IMUL_COST);
 6675   format %{ "mul  $dst, $src1, $src2\t#@mulL" %}
 6676 
 6677   ins_encode %{
 6678     __ mul(as_Register($dst$$reg),
 6679            as_Register($src1$$reg),
 6680            as_Register($src2$$reg));
 6681   %}
 6682 
 6683   ins_pipe(lmul_reg_reg);
 6684 %}
 6685 
 6686 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6687 %{
 6688   match(Set dst (MulHiL src1 src2));
 6689   ins_cost(IMUL_COST);
 6690   format %{ "mulh  $dst, $src1, $src2\t# mulhi, #@mulHiL_rReg" %}
 6691 
 6692   ins_encode %{
 6693     __ mulh(as_Register($dst$$reg),
 6694             as_Register($src1$$reg),
 6695             as_Register($src2$$reg));
 6696   %}
 6697 
 6698   ins_pipe(lmul_reg_reg);
 6699 %}
 6700 
 6701 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
 6702 %{
 6703   match(Set dst (UMulHiL src1 src2));
 6704   ins_cost(IMUL_COST);
 6705   format %{ "mulhu  $dst, $src1, $src2\t# umulhi, #@umulHiL_rReg" %}
 6706 
 6707   ins_encode %{
 6708     __ mulhu(as_Register($dst$$reg),
 6709              as_Register($src1$$reg),
 6710              as_Register($src2$$reg));
 6711   %}
 6712 
 6713   ins_pipe(lmul_reg_reg);
 6714 %}
 6715 
 6716 // Integer Divide
 6717 
 6718 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6719   match(Set dst (DivI src1 src2));
 6720   ins_cost(IDIVSI_COST);
 6721   format %{ "divw  $dst, $src1, $src2\t#@divI"%}
 6722 
 6723   ins_encode %{
 6724     __ divw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6725   %}
 6726   ins_pipe(idiv_reg_reg);
 6727 %}
 6728 
 6729 instruct UdivI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6730   match(Set dst (UDivI src1 src2));
 6731   ins_cost(IDIVSI_COST);
 6732   format %{ "divuw  $dst, $src1, $src2\t#@UdivI"%}
 6733 
 6734   ins_encode %{
 6735     __ divuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6736   %}
 6737   ins_pipe(idiv_reg_reg);
 6738 %}
 6739 
 6740 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
 6741   match(Set dst (URShiftI (RShiftI src1 div1) div2));
 6742   ins_cost(ALU_COST);
 6743   format %{ "srliw $dst, $src1, $div1\t# int signExtract, #@signExtract" %}
 6744 
 6745   ins_encode %{
 6746     __ srliw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
 6747   %}
 6748   ins_pipe(ialu_reg_shift);
 6749 %}
 6750 
 6751 // Long Divide
 6752 
 6753 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6754   match(Set dst (DivL src1 src2));
 6755   ins_cost(IDIVDI_COST);
 6756   format %{ "div  $dst, $src1, $src2\t#@divL" %}
 6757 
 6758   ins_encode %{
 6759     __ div(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6760   %}
 6761   ins_pipe(ldiv_reg_reg);
 6762 %}
 6763 
 6764 instruct UdivL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6765   match(Set dst (UDivL src1 src2));
 6766   ins_cost(IDIVDI_COST);
 6767 
 6768   format %{ "divu $dst, $src1, $src2\t#@UdivL" %}
 6769 
 6770   ins_encode %{
 6771     __ divu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6772   %}
 6773   ins_pipe(ldiv_reg_reg);
 6774 %}
 6775 
 6776 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
 6777   match(Set dst (URShiftL (RShiftL src1 div1) div2));
 6778   ins_cost(ALU_COST);
 6779   format %{ "srli $dst, $src1, $div1\t# long signExtract, #@signExtractL" %}
 6780 
 6781   ins_encode %{
 6782     __ srli(as_Register($dst$$reg), as_Register($src1$$reg), 63);
 6783   %}
 6784   ins_pipe(ialu_reg_shift);
 6785 %}
 6786 
 6787 // Integer Remainder
 6788 
 6789 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6790   match(Set dst (ModI src1 src2));
 6791   ins_cost(IDIVSI_COST);
 6792   format %{ "remw  $dst, $src1, $src2\t#@modI" %}
 6793 
 6794   ins_encode %{
 6795     __ remw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6796   %}
 6797   ins_pipe(ialu_reg_reg);
 6798 %}
 6799 
 6800 instruct UmodI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6801   match(Set dst (UModI src1 src2));
 6802   ins_cost(IDIVSI_COST);
 6803   format %{ "remuw  $dst, $src1, $src2\t#@UmodI" %}
 6804 
 6805   ins_encode %{
 6806     __ remuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6807   %}
 6808   ins_pipe(ialu_reg_reg);
 6809 %}
 6810 
 6811 // Long Remainder
 6812 
 6813 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6814   match(Set dst (ModL src1 src2));
 6815   ins_cost(IDIVDI_COST);
 6816   format %{ "rem  $dst, $src1, $src2\t#@modL" %}
 6817 
 6818   ins_encode %{
 6819     __ rem(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6820   %}
 6821   ins_pipe(ialu_reg_reg);
 6822 %}
 6823 
 6824 instruct UmodL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 6825   match(Set dst (UModL src1 src2));
 6826   ins_cost(IDIVDI_COST);
 6827   format %{ "remu  $dst, $src1, $src2\t#@UmodL" %}
 6828 
 6829   ins_encode %{
 6830     __ remu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
 6831   %}
 6832   ins_pipe(ialu_reg_reg);
 6833 %}
 6834 
 6835 // Integer Shifts
 6836 
 6837 // Shift Left Register
 6838 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
 6839 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6840   match(Set dst (LShiftI src1 src2));
 6841   ins_cost(ALU_COST);
 6842   format %{ "sllw  $dst, $src1, $src2\t#@lShiftI_reg_reg" %}
 6843 
 6844   ins_encode %{
 6845     __ sllw(as_Register($dst$$reg),
 6846             as_Register($src1$$reg),
 6847             as_Register($src2$$reg));
 6848   %}
 6849 
 6850   ins_pipe(ialu_reg_reg_vshift);
 6851 %}
 6852 
 6853 // Shift Left Immediate
 6854 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6855   match(Set dst (LShiftI src1 src2));
 6856   ins_cost(ALU_COST);
 6857   format %{ "slliw  $dst, $src1, ($src2 & 0x1f)\t#@lShiftI_reg_imm" %}
 6858 
 6859   ins_encode %{
 6860     // the shift amount is encoded in the lower
 6861     // 5 bits of the I-immediate field for RV32I
 6862     __ slliw(as_Register($dst$$reg),
 6863              as_Register($src1$$reg),
 6864              (unsigned) $src2$$constant & 0x1f);
 6865   %}
 6866 
 6867   ins_pipe(ialu_reg_shift);
 6868 %}
 6869 
 6870 // Shift Right Logical Register
 6871 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
 6872 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6873   match(Set dst (URShiftI src1 src2));
 6874   ins_cost(ALU_COST);
 6875   format %{ "srlw  $dst, $src1, $src2\t#@urShiftI_reg_reg" %}
 6876 
 6877   ins_encode %{
 6878     __ srlw(as_Register($dst$$reg),
 6879             as_Register($src1$$reg),
 6880             as_Register($src2$$reg));
 6881   %}
 6882 
 6883   ins_pipe(ialu_reg_reg_vshift);
 6884 %}
 6885 
 6886 // Shift Right Logical Immediate
 6887 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6888   match(Set dst (URShiftI src1 src2));
 6889   ins_cost(ALU_COST);
 6890   format %{ "srliw  $dst, $src1, ($src2 & 0x1f)\t#@urShiftI_reg_imm" %}
 6891 
 6892   ins_encode %{
 6893     // the shift amount is encoded in the lower
 6894     // 6 bits of the I-immediate field for RV64I
 6895     __ srliw(as_Register($dst$$reg),
 6896              as_Register($src1$$reg),
 6897              (unsigned) $src2$$constant & 0x1f);
 6898   %}
 6899 
 6900   ins_pipe(ialu_reg_shift);
 6901 %}
 6902 
 6903 // Shift Right Arithmetic Register
 6904 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
 6905 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 6906   match(Set dst (RShiftI src1 src2));
 6907   ins_cost(ALU_COST);
 6908   format %{ "sraw  $dst, $src1, $src2\t#@rShiftI_reg_reg" %}
 6909 
 6910   ins_encode %{
 6911     // riscv will sign-ext dst high 32 bits
 6912     __ sraw(as_Register($dst$$reg),
 6913             as_Register($src1$$reg),
 6914             as_Register($src2$$reg));
 6915   %}
 6916 
 6917   ins_pipe(ialu_reg_reg_vshift);
 6918 %}
 6919 
 6920 // Shift Right Arithmetic Immediate
 6921 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
 6922   match(Set dst (RShiftI src1 src2));
 6923   ins_cost(ALU_COST);
 6924   format %{ "sraiw  $dst, $src1, ($src2 & 0x1f)\t#@rShiftI_reg_imm" %}
 6925 
 6926   ins_encode %{
 6927     // riscv will sign-ext dst high 32 bits
 6928     __ sraiw(as_Register($dst$$reg),
 6929              as_Register($src1$$reg),
 6930              (unsigned) $src2$$constant & 0x1f);
 6931   %}
 6932 
 6933   ins_pipe(ialu_reg_shift);
 6934 %}
 6935 
 6936 // Long Shifts
 6937 
 6938 // Shift Left Register
 6939 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
 6940 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 6941   match(Set dst (LShiftL src1 src2));
 6942 
 6943   ins_cost(ALU_COST);
 6944   format %{ "sll  $dst, $src1, $src2\t#@lShiftL_reg_reg" %}
 6945 
 6946   ins_encode %{
 6947     __ sll(as_Register($dst$$reg),
 6948            as_Register($src1$$reg),
 6949            as_Register($src2$$reg));
 6950   %}
 6951 
 6952   ins_pipe(ialu_reg_reg_vshift);
 6953 %}
 6954 
 6955 // Shift Left Immediate
 6956 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 6957   match(Set dst (LShiftL src1 src2));
 6958 
 6959   ins_cost(ALU_COST);
 6960   format %{ "slli  $dst, $src1, ($src2 & 0x3f)\t#@lShiftL_reg_imm" %}
 6961 
 6962   ins_encode %{
 6963     // the shift amount is encoded in the lower
 6964     // 6 bits of the I-immediate field for RV64I
 6965     __ slli(as_Register($dst$$reg),
 6966             as_Register($src1$$reg),
 6967             (unsigned) $src2$$constant & 0x3f);
 6968   %}
 6969 
 6970   ins_pipe(ialu_reg_shift);
 6971 %}
 6972 
 6973 // Shift Right Logical Register
 6974 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
 6975 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 6976   match(Set dst (URShiftL src1 src2));
 6977 
 6978   ins_cost(ALU_COST);
 6979   format %{ "srl  $dst, $src1, $src2\t#@urShiftL_reg_reg" %}
 6980 
 6981   ins_encode %{
 6982     __ srl(as_Register($dst$$reg),
 6983             as_Register($src1$$reg),
 6984             as_Register($src2$$reg));
 6985   %}
 6986 
 6987   ins_pipe(ialu_reg_reg_vshift);
 6988 %}
 6989 
 6990 // Shift Right Logical Immediate
 6991 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 6992   match(Set dst (URShiftL src1 src2));
 6993 
 6994   ins_cost(ALU_COST);
 6995   format %{ "srli  $dst, $src1, ($src2 & 0x3f)\t#@urShiftL_reg_imm" %}
 6996 
 6997   ins_encode %{
 6998     // the shift amount is encoded in the lower
 6999     // 6 bits of the I-immediate field for RV64I
 7000     __ srli(as_Register($dst$$reg),
 7001             as_Register($src1$$reg),
 7002             (unsigned) $src2$$constant & 0x3f);
 7003   %}
 7004 
 7005   ins_pipe(ialu_reg_shift);
 7006 %}
 7007 
 7008 // A special-case pattern for card table stores.
 7009 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
 7010   match(Set dst (URShiftL (CastP2X src1) src2));
 7011 
 7012   ins_cost(ALU_COST);
 7013   format %{ "srli  $dst, p2x($src1), ($src2 & 0x3f)\t#@urShiftP_reg_imm" %}
 7014 
 7015   ins_encode %{
 7016     // the shift amount is encoded in the lower
 7017     // 6 bits of the I-immediate field for RV64I
 7018     __ srli(as_Register($dst$$reg),
 7019             as_Register($src1$$reg),
 7020             (unsigned) $src2$$constant & 0x3f);
 7021   %}
 7022 
 7023   ins_pipe(ialu_reg_shift);
 7024 %}
 7025 
 7026 // Shift Right Arithmetic Register
 7027 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
 7028 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
 7029   match(Set dst (RShiftL src1 src2));
 7030 
 7031   ins_cost(ALU_COST);
 7032   format %{ "sra  $dst, $src1, $src2\t#@rShiftL_reg_reg" %}
 7033 
 7034   ins_encode %{
 7035     __ sra(as_Register($dst$$reg),
 7036            as_Register($src1$$reg),
 7037            as_Register($src2$$reg));
 7038   %}
 7039 
 7040   ins_pipe(ialu_reg_reg_vshift);
 7041 %}
 7042 
 7043 // Shift Right Arithmetic Immediate
 7044 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
 7045   match(Set dst (RShiftL src1 src2));
 7046 
 7047   ins_cost(ALU_COST);
 7048   format %{ "srai  $dst, $src1, ($src2 & 0x3f)\t#@rShiftL_reg_imm" %}
 7049 
 7050   ins_encode %{
 7051     // the shift amount is encoded in the lower
 7052     // 6 bits of the I-immediate field for RV64I
 7053     __ srai(as_Register($dst$$reg),
 7054             as_Register($src1$$reg),
 7055             (unsigned) $src2$$constant & 0x3f);
 7056   %}
 7057 
 7058   ins_pipe(ialu_reg_shift);
 7059 %}
 7060 
 7061 instruct regI_not_reg(iRegINoSp dst, iRegI src1, immI_M1 m1) %{
 7062   match(Set dst (XorI src1 m1));
 7063   ins_cost(ALU_COST);
 7064   format %{ "xori  $dst, $src1, -1\t#@regI_not_reg" %}
 7065 
 7066   ins_encode %{
 7067     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7068   %}
 7069 
 7070   ins_pipe(ialu_reg_imm);
 7071 %}
 7072 
 7073 instruct regL_not_reg(iRegLNoSp dst, iRegL src1, immL_M1 m1) %{
 7074   match(Set dst (XorL src1 m1));
 7075   ins_cost(ALU_COST);
 7076   format %{ "xori  $dst, $src1, -1\t#@regL_not_reg" %}
 7077 
 7078   ins_encode %{
 7079     __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
 7080   %}
 7081 
 7082   ins_pipe(ialu_reg_imm);
 7083 %}
 7084 
 7085 
 7086 // ============================================================================
 7087 // Floating Point Arithmetic Instructions
 7088 
 7089 instruct addF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7090   match(Set dst (AddF src1 src2));
 7091 
 7092   ins_cost(DEFAULT_COST * 5);
 7093   format %{ "fadd.s  $dst, $src1, $src2\t#@addF_reg_reg" %}
 7094 
 7095   ins_encode %{
 7096     __ fadd_s(as_FloatRegister($dst$$reg),
 7097               as_FloatRegister($src1$$reg),
 7098               as_FloatRegister($src2$$reg));
 7099   %}
 7100 
 7101   ins_pipe(fp_dop_reg_reg_s);
 7102 %}
 7103 
 7104 instruct addD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7105   match(Set dst (AddD src1 src2));
 7106 
 7107   ins_cost(DEFAULT_COST * 5);
 7108   format %{ "fadd.d  $dst, $src1, $src2\t#@addD_reg_reg" %}
 7109 
 7110   ins_encode %{
 7111     __ fadd_d(as_FloatRegister($dst$$reg),
 7112               as_FloatRegister($src1$$reg),
 7113               as_FloatRegister($src2$$reg));
 7114   %}
 7115 
 7116   ins_pipe(fp_dop_reg_reg_d);
 7117 %}
 7118 
 7119 instruct subF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7120   match(Set dst (SubF src1 src2));
 7121 
 7122   ins_cost(DEFAULT_COST * 5);
 7123   format %{ "fsub.s  $dst, $src1, $src2\t#@subF_reg_reg" %}
 7124 
 7125   ins_encode %{
 7126     __ fsub_s(as_FloatRegister($dst$$reg),
 7127               as_FloatRegister($src1$$reg),
 7128               as_FloatRegister($src2$$reg));
 7129   %}
 7130 
 7131   ins_pipe(fp_dop_reg_reg_s);
 7132 %}
 7133 
 7134 instruct subD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7135   match(Set dst (SubD src1 src2));
 7136 
 7137   ins_cost(DEFAULT_COST * 5);
 7138   format %{ "fsub.d  $dst, $src1, $src2\t#@subD_reg_reg" %}
 7139 
 7140   ins_encode %{
 7141     __ fsub_d(as_FloatRegister($dst$$reg),
 7142               as_FloatRegister($src1$$reg),
 7143               as_FloatRegister($src2$$reg));
 7144   %}
 7145 
 7146   ins_pipe(fp_dop_reg_reg_d);
 7147 %}
 7148 
 7149 instruct mulF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7150   match(Set dst (MulF src1 src2));
 7151 
 7152   ins_cost(FMUL_SINGLE_COST);
 7153   format %{ "fmul.s  $dst, $src1, $src2\t#@mulF_reg_reg" %}
 7154 
 7155   ins_encode %{
 7156     __ fmul_s(as_FloatRegister($dst$$reg),
 7157               as_FloatRegister($src1$$reg),
 7158               as_FloatRegister($src2$$reg));
 7159   %}
 7160 
 7161   ins_pipe(fp_dop_reg_reg_s);
 7162 %}
 7163 
 7164 instruct mulD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7165   match(Set dst (MulD src1 src2));
 7166 
 7167   ins_cost(FMUL_DOUBLE_COST);
 7168   format %{ "fmul.d  $dst, $src1, $src2\t#@mulD_reg_reg" %}
 7169 
 7170   ins_encode %{
 7171     __ fmul_d(as_FloatRegister($dst$$reg),
 7172               as_FloatRegister($src1$$reg),
 7173               as_FloatRegister($src2$$reg));
 7174   %}
 7175 
 7176   ins_pipe(fp_dop_reg_reg_d);
 7177 %}
 7178 
 7179 // src1 * src2 + src3
 7180 instruct maddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7181   match(Set dst (FmaF src3 (Binary src1 src2)));
 7182 
 7183   ins_cost(FMUL_SINGLE_COST);
 7184   format %{ "fmadd.s  $dst, $src1, $src2, $src3\t#@maddF_reg_reg" %}
 7185 
 7186   ins_encode %{
 7187     assert(UseFMA, "Needs FMA instructions support.");
 7188     __ fmadd_s(as_FloatRegister($dst$$reg),
 7189                as_FloatRegister($src1$$reg),
 7190                as_FloatRegister($src2$$reg),
 7191                as_FloatRegister($src3$$reg));
 7192   %}
 7193 
 7194   ins_pipe(pipe_class_default);
 7195 %}
 7196 
 7197 // src1 * src2 + src3
 7198 instruct maddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7199   match(Set dst (FmaD src3 (Binary src1 src2)));
 7200 
 7201   ins_cost(FMUL_DOUBLE_COST);
 7202   format %{ "fmadd.d  $dst, $src1, $src2, $src3\t#@maddD_reg_reg" %}
 7203 
 7204   ins_encode %{
 7205     assert(UseFMA, "Needs FMA instructions support.");
 7206     __ fmadd_d(as_FloatRegister($dst$$reg),
 7207                as_FloatRegister($src1$$reg),
 7208                as_FloatRegister($src2$$reg),
 7209                as_FloatRegister($src3$$reg));
 7210   %}
 7211 
 7212   ins_pipe(pipe_class_default);
 7213 %}
 7214 
 7215 // src1 * src2 - src3
 7216 instruct msubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7217   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
 7218 
 7219   ins_cost(FMUL_SINGLE_COST);
 7220   format %{ "fmsub.s  $dst, $src1, $src2, $src3\t#@msubF_reg_reg" %}
 7221 
 7222   ins_encode %{
 7223     assert(UseFMA, "Needs FMA instructions support.");
 7224     __ fmsub_s(as_FloatRegister($dst$$reg),
 7225                as_FloatRegister($src1$$reg),
 7226                as_FloatRegister($src2$$reg),
 7227                as_FloatRegister($src3$$reg));
 7228   %}
 7229 
 7230   ins_pipe(pipe_class_default);
 7231 %}
 7232 
 7233 // src1 * src2 - src3
 7234 instruct msubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7235   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
 7236 
 7237   ins_cost(FMUL_DOUBLE_COST);
 7238   format %{ "fmsub.d  $dst, $src1, $src2, $src3\t#@msubD_reg_reg" %}
 7239 
 7240   ins_encode %{
 7241     assert(UseFMA, "Needs FMA instructions support.");
 7242     __ fmsub_d(as_FloatRegister($dst$$reg),
 7243                as_FloatRegister($src1$$reg),
 7244                as_FloatRegister($src2$$reg),
 7245                as_FloatRegister($src3$$reg));
 7246   %}
 7247 
 7248   ins_pipe(pipe_class_default);
 7249 %}
 7250 
 7251 // src1 * (-src2) + src3
 7252 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7253 instruct nmsubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7254   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
 7255 
 7256   ins_cost(FMUL_SINGLE_COST);
 7257   format %{ "fnmsub.s  $dst, $src1, $src2, $src3\t#@nmsubF_reg_reg" %}
 7258 
 7259   ins_encode %{
 7260     assert(UseFMA, "Needs FMA instructions support.");
 7261     __ fnmsub_s(as_FloatRegister($dst$$reg),
 7262                 as_FloatRegister($src1$$reg),
 7263                 as_FloatRegister($src2$$reg),
 7264                 as_FloatRegister($src3$$reg));
 7265   %}
 7266 
 7267   ins_pipe(pipe_class_default);
 7268 %}
 7269 
 7270 // src1 * (-src2) + src3
 7271 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
 7272 instruct nmsubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7273   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
 7274 
 7275   ins_cost(FMUL_DOUBLE_COST);
 7276   format %{ "fnmsub.d  $dst, $src1, $src2, $src3\t#@nmsubD_reg_reg" %}
 7277 
 7278   ins_encode %{
 7279     assert(UseFMA, "Needs FMA instructions support.");
 7280     __ fnmsub_d(as_FloatRegister($dst$$reg),
 7281                 as_FloatRegister($src1$$reg),
 7282                 as_FloatRegister($src2$$reg),
 7283                 as_FloatRegister($src3$$reg));
 7284   %}
 7285 
 7286   ins_pipe(pipe_class_default);
 7287 %}
 7288 
 7289 // src1 * (-src2) - src3
 7290 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7291 instruct nmaddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
 7292   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
 7293 
 7294   ins_cost(FMUL_SINGLE_COST);
 7295   format %{ "fnmadd.s  $dst, $src1, $src2, $src3\t#@nmaddF_reg_reg" %}
 7296 
 7297   ins_encode %{
 7298     assert(UseFMA, "Needs FMA instructions support.");
 7299     __ fnmadd_s(as_FloatRegister($dst$$reg),
 7300                 as_FloatRegister($src1$$reg),
 7301                 as_FloatRegister($src2$$reg),
 7302                 as_FloatRegister($src3$$reg));
 7303   %}
 7304 
 7305   ins_pipe(pipe_class_default);
 7306 %}
 7307 
 7308 // src1 * (-src2) - src3
 7309 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
 7310 instruct nmaddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
 7311   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
 7312 
 7313   ins_cost(FMUL_DOUBLE_COST);
 7314   format %{ "fnmadd.d  $dst, $src1, $src2, $src3\t#@nmaddD_reg_reg" %}
 7315 
 7316   ins_encode %{
 7317     assert(UseFMA, "Needs FMA instructions support.");
 7318     __ fnmadd_d(as_FloatRegister($dst$$reg),
 7319                 as_FloatRegister($src1$$reg),
 7320                 as_FloatRegister($src2$$reg),
 7321                 as_FloatRegister($src3$$reg));
 7322   %}
 7323 
 7324   ins_pipe(pipe_class_default);
 7325 %}
 7326 
 7327 // Math.max(FF)F
 7328 instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7329   predicate(!UseZfa);
 7330   match(Set dst (MaxF src1 src2));
 7331   effect(KILL cr);
 7332 
 7333   format %{ "maxF $dst, $src1, $src2" %}
 7334 
 7335   ins_encode %{
 7336     __ minmax_fp(as_FloatRegister($dst$$reg),
 7337                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7338                  __ FLOAT_TYPE::single_precision, false /* is_min */);
 7339   %}
 7340 
 7341   ins_pipe(pipe_class_default);
 7342 %}
 7343 
 7344 instruct maxF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
 7345   predicate(UseZfa);
 7346   match(Set dst (MaxF src1 src2));
 7347 
 7348   format %{ "maxF $dst, $src1, $src2" %}
 7349 
 7350   ins_encode %{
 7351     __ fmaxm_s(as_FloatRegister($dst$$reg),
 7352                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7353   %}
 7354 
 7355   ins_pipe(pipe_class_default);
 7356 %}
 7357 
 7358 // Math.min(FF)F
 7359 instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
 7360   predicate(!UseZfa);
 7361   match(Set dst (MinF src1 src2));
 7362   effect(KILL cr);
 7363 
 7364   format %{ "minF $dst, $src1, $src2" %}
 7365 
 7366   ins_encode %{
 7367     __ minmax_fp(as_FloatRegister($dst$$reg),
 7368                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7369                  __ FLOAT_TYPE::single_precision, true /* is_min */);
 7370   %}
 7371 
 7372   ins_pipe(pipe_class_default);
 7373 %}
 7374 
 7375 instruct minF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
 7376   predicate(UseZfa);
 7377   match(Set dst (MinF src1 src2));
 7378 
 7379   format %{ "minF $dst, $src1, $src2" %}
 7380 
 7381   ins_encode %{
 7382     __ fminm_s(as_FloatRegister($dst$$reg),
 7383                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7384   %}
 7385 
 7386   ins_pipe(pipe_class_default);
 7387 %}
 7388 
 7389 // Math.max(DD)D
 7390 instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7391   predicate(!UseZfa);
 7392   match(Set dst (MaxD src1 src2));
 7393   effect(KILL cr);
 7394 
 7395   format %{ "maxD $dst, $src1, $src2" %}
 7396 
 7397   ins_encode %{
 7398     __ minmax_fp(as_FloatRegister($dst$$reg),
 7399                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7400                  __ FLOAT_TYPE::double_precision, false /* is_min */);
 7401   %}
 7402 
 7403   ins_pipe(pipe_class_default);
 7404 %}
 7405 
 7406 instruct maxD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
 7407   predicate(UseZfa);
 7408   match(Set dst (MaxD src1 src2));
 7409 
 7410   format %{ "maxD $dst, $src1, $src2" %}
 7411 
 7412   ins_encode %{
 7413     __ fmaxm_d(as_FloatRegister($dst$$reg),
 7414                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7415   %}
 7416 
 7417   ins_pipe(pipe_class_default);
 7418 %}
 7419 
 7420 // Math.min(DD)D
 7421 instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
 7422   predicate(!UseZfa);
 7423   match(Set dst (MinD src1 src2));
 7424   effect(KILL cr);
 7425 
 7426   format %{ "minD $dst, $src1, $src2" %}
 7427 
 7428   ins_encode %{
 7429     __ minmax_fp(as_FloatRegister($dst$$reg),
 7430                  as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
 7431                  __ FLOAT_TYPE::double_precision, true /* is_min */);
 7432   %}
 7433 
 7434   ins_pipe(pipe_class_default);
 7435 %}
 7436 
 7437 instruct minD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
 7438   predicate(UseZfa);
 7439   match(Set dst (MinD src1 src2));
 7440 
 7441   format %{ "minD $dst, $src1, $src2" %}
 7442 
 7443   ins_encode %{
 7444     __ fminm_d(as_FloatRegister($dst$$reg),
 7445                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 7446   %}
 7447 
 7448   ins_pipe(pipe_class_default);
 7449 %}
 7450 
 7451 // Float.isInfinite
 7452 instruct isInfiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7453 %{
 7454   match(Set dst (IsInfiniteF src));
 7455 
 7456   format %{ "isInfinite $dst, $src" %}
 7457   ins_encode %{
 7458     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7459     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
 7460     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7461   %}
 7462 
 7463   ins_pipe(pipe_class_default);
 7464 %}
 7465 
 7466 // Double.isInfinite
 7467 instruct isInfiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7468 %{
 7469   match(Set dst (IsInfiniteD src));
 7470 
 7471   format %{ "isInfinite $dst, $src" %}
 7472   ins_encode %{
 7473     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7474     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
 7475     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7476   %}
 7477 
 7478   ins_pipe(pipe_class_default);
 7479 %}
 7480 
 7481 // Float.isFinite
 7482 instruct isFiniteF_reg_reg(iRegINoSp dst, fRegF src)
 7483 %{
 7484   match(Set dst (IsFiniteF src));
 7485 
 7486   format %{ "isFinite $dst, $src" %}
 7487   ins_encode %{
 7488     __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7489     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
 7490     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7491   %}
 7492 
 7493   ins_pipe(pipe_class_default);
 7494 %}
 7495 
 7496 // Double.isFinite
 7497 instruct isFiniteD_reg_reg(iRegINoSp dst, fRegD src)
 7498 %{
 7499   match(Set dst (IsFiniteD src));
 7500 
 7501   format %{ "isFinite $dst, $src" %}
 7502   ins_encode %{
 7503     __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 7504     __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
 7505     __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 7506   %}
 7507 
 7508   ins_pipe(pipe_class_default);
 7509 %}
 7510 
 7511 instruct divF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7512   match(Set dst (DivF src1  src2));
 7513 
 7514   ins_cost(FDIV_COST);
 7515   format %{ "fdiv.s  $dst, $src1, $src2\t#@divF_reg_reg" %}
 7516 
 7517   ins_encode %{
 7518     __ fdiv_s(as_FloatRegister($dst$$reg),
 7519               as_FloatRegister($src1$$reg),
 7520               as_FloatRegister($src2$$reg));
 7521   %}
 7522 
 7523   ins_pipe(fp_div_s);
 7524 %}
 7525 
 7526 instruct divD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
 7527   match(Set dst (DivD src1  src2));
 7528 
 7529   ins_cost(FDIV_COST);
 7530   format %{ "fdiv.d  $dst, $src1, $src2\t#@divD_reg_reg" %}
 7531 
 7532   ins_encode %{
 7533     __ fdiv_d(as_FloatRegister($dst$$reg),
 7534               as_FloatRegister($src1$$reg),
 7535               as_FloatRegister($src2$$reg));
 7536   %}
 7537 
 7538   ins_pipe(fp_div_d);
 7539 %}
 7540 
 7541 instruct negF_reg_reg(fRegF dst, fRegF src) %{
 7542   match(Set dst (NegF src));
 7543 
 7544   ins_cost(XFER_COST);
 7545   format %{ "fsgnjn.s  $dst, $src, $src\t#@negF_reg_reg" %}
 7546 
 7547   ins_encode %{
 7548     __ fneg_s(as_FloatRegister($dst$$reg),
 7549               as_FloatRegister($src$$reg));
 7550   %}
 7551 
 7552   ins_pipe(fp_uop_s);
 7553 %}
 7554 
 7555 instruct negD_reg_reg(fRegD dst, fRegD src) %{
 7556   match(Set dst (NegD src));
 7557 
 7558   ins_cost(XFER_COST);
 7559   format %{ "fsgnjn.d  $dst, $src, $src\t#@negD_reg_reg" %}
 7560 
 7561   ins_encode %{
 7562     __ fneg_d(as_FloatRegister($dst$$reg),
 7563               as_FloatRegister($src$$reg));
 7564   %}
 7565 
 7566   ins_pipe(fp_uop_d);
 7567 %}
 7568 
 7569 instruct absI_reg(iRegINoSp dst, iRegIorL2I src) %{
 7570   match(Set dst (AbsI src));
 7571 
 7572   ins_cost(ALU_COST * 3);
 7573   format %{
 7574     "sraiw  t0, $src, 0x1f\n\t"
 7575     "addw  $dst, $src, t0\n\t"
 7576     "xorr  $dst, $dst, t0\t#@absI_reg"
 7577   %}
 7578 
 7579   ins_encode %{
 7580     __ sraiw(t0, as_Register($src$$reg), 0x1f);
 7581     __ addw(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7582     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7583   %}
 7584 
 7585   ins_pipe(pipe_class_default);
 7586 %}
 7587 
 7588 instruct absL_reg(iRegLNoSp dst, iRegL src) %{
 7589   match(Set dst (AbsL src));
 7590 
 7591   ins_cost(ALU_COST * 3);
 7592   format %{
 7593     "srai  t0, $src, 0x3f\n\t"
 7594     "add  $dst, $src, t0\n\t"
 7595     "xorr  $dst, $dst, t0\t#@absL_reg"
 7596   %}
 7597 
 7598   ins_encode %{
 7599     __ srai(t0, as_Register($src$$reg), 0x3f);
 7600     __ add(as_Register($dst$$reg), as_Register($src$$reg), t0);
 7601     __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
 7602   %}
 7603 
 7604   ins_pipe(pipe_class_default);
 7605 %}
 7606 
 7607 instruct absF_reg(fRegF dst, fRegF src) %{
 7608   match(Set dst (AbsF src));
 7609 
 7610   ins_cost(XFER_COST);
 7611   format %{ "fsgnjx.s  $dst, $src, $src\t#@absF_reg" %}
 7612   ins_encode %{
 7613     __ fabs_s(as_FloatRegister($dst$$reg),
 7614               as_FloatRegister($src$$reg));
 7615   %}
 7616 
 7617   ins_pipe(fp_uop_s);
 7618 %}
 7619 
 7620 instruct absD_reg(fRegD dst, fRegD src) %{
 7621   match(Set dst (AbsD src));
 7622 
 7623   ins_cost(XFER_COST);
 7624   format %{ "fsgnjx.d  $dst, $src, $src\t#@absD_reg" %}
 7625   ins_encode %{
 7626     __ fabs_d(as_FloatRegister($dst$$reg),
 7627               as_FloatRegister($src$$reg));
 7628   %}
 7629 
 7630   ins_pipe(fp_uop_d);
 7631 %}
 7632 
 7633 instruct sqrtF_reg(fRegF dst, fRegF src) %{
 7634   match(Set dst (SqrtF src));
 7635 
 7636   ins_cost(FSQRT_COST);
 7637   format %{ "fsqrt.s  $dst, $src\t#@sqrtF_reg" %}
 7638   ins_encode %{
 7639     __ fsqrt_s(as_FloatRegister($dst$$reg),
 7640                as_FloatRegister($src$$reg));
 7641   %}
 7642 
 7643   ins_pipe(fp_sqrt_s);
 7644 %}
 7645 
 7646 instruct sqrtD_reg(fRegD dst, fRegD src) %{
 7647   match(Set dst (SqrtD src));
 7648 
 7649   ins_cost(FSQRT_COST);
 7650   format %{ "fsqrt.d  $dst, $src\t#@sqrtD_reg" %}
 7651   ins_encode %{
 7652     __ fsqrt_d(as_FloatRegister($dst$$reg),
 7653                as_FloatRegister($src$$reg));
 7654   %}
 7655 
 7656   ins_pipe(fp_sqrt_d);
 7657 %}
 7658 
 7659 // Round Instruction
 7660 instruct roundD_reg(fRegD dst, fRegD src, immI rmode, iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3) %{
 7661   match(Set dst (RoundDoubleMode src rmode));
 7662   ins_cost(2 * XFER_COST + BRANCH_COST);
 7663   effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3);
 7664 
 7665   format %{ "RoundDoubleMode $src, $rmode" %}
 7666   ins_encode %{
 7667     __ round_double_mode(as_FloatRegister($dst$$reg),
 7668                as_FloatRegister($src$$reg), $rmode$$constant, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
 7669   %}
 7670   ins_pipe(pipe_class_default);
 7671 %}
 7672 
 7673 // Copysign and signum intrinsics
 7674 
 7675 instruct copySignD_reg(fRegD dst, fRegD src1, fRegD src2, immD zero) %{
 7676   match(Set dst (CopySignD src1 (Binary src2 zero)));
 7677   format %{ "CopySignD  $dst $src1 $src2" %}
 7678   ins_encode %{
 7679     FloatRegister dst = as_FloatRegister($dst$$reg),
 7680                   src1 = as_FloatRegister($src1$$reg),
 7681                   src2 = as_FloatRegister($src2$$reg);
 7682     __ fsgnj_d(dst, src1, src2);
 7683   %}
 7684   ins_pipe(fp_dop_reg_reg_d);
 7685 %}
 7686 
 7687 instruct copySignF_reg(fRegF dst, fRegF src1, fRegF src2) %{
 7688   match(Set dst (CopySignF src1 src2));
 7689   format %{ "CopySignF  $dst $src1 $src2" %}
 7690   ins_encode %{
 7691     FloatRegister dst = as_FloatRegister($dst$$reg),
 7692                   src1 = as_FloatRegister($src1$$reg),
 7693                   src2 = as_FloatRegister($src2$$reg);
 7694     __ fsgnj_s(dst, src1, src2);
 7695   %}
 7696   ins_pipe(fp_dop_reg_reg_s);
 7697 %}
 7698 
 7699 instruct signumD_reg(fRegD dst, immD zero, fRegD one) %{
 7700   match(Set dst (SignumD dst (Binary zero one)));
 7701   format %{ "signumD  $dst, $dst" %}
 7702   ins_encode %{
 7703     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), true /* is_double */);
 7704   %}
 7705   ins_pipe(pipe_class_default);
 7706 %}
 7707 
 7708 instruct signumF_reg(fRegF dst, immF zero, fRegF one) %{
 7709   match(Set dst (SignumF dst (Binary zero one)));
 7710   format %{ "signumF  $dst, $dst" %}
 7711   ins_encode %{
 7712     __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), false /* is_double */);
 7713   %}
 7714   ins_pipe(pipe_class_default);
 7715 %}
 7716 
 7717 // Arithmetic Instructions End
 7718 
 7719 // ============================================================================
 7720 // Logical Instructions
 7721 
 7722 // Register And
 7723 instruct andI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7724   match(Set dst (AndI src1 src2));
 7725 
 7726   format %{ "andr  $dst, $src1, $src2\t#@andI_reg_reg" %}
 7727 
 7728   ins_cost(ALU_COST);
 7729   ins_encode %{
 7730     __ andr(as_Register($dst$$reg),
 7731             as_Register($src1$$reg),
 7732             as_Register($src2$$reg));
 7733   %}
 7734 
 7735   ins_pipe(ialu_reg_reg);
 7736 %}
 7737 
 7738 // Immediate And
 7739 instruct andI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7740   match(Set dst (AndI src1 src2));
 7741 
 7742   format %{ "andi  $dst, $src1, $src2\t#@andI_reg_imm" %}
 7743 
 7744   ins_cost(ALU_COST);
 7745   ins_encode %{
 7746     __ andi(as_Register($dst$$reg),
 7747             as_Register($src1$$reg),
 7748             (int32_t)($src2$$constant));
 7749   %}
 7750 
 7751   ins_pipe(ialu_reg_imm);
 7752 %}
 7753 
 7754 // Register Or
 7755 instruct orI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7756   match(Set dst (OrI src1 src2));
 7757 
 7758   format %{ "orr  $dst, $src1, $src2\t#@orI_reg_reg" %}
 7759 
 7760   ins_cost(ALU_COST);
 7761   ins_encode %{
 7762     __ orr(as_Register($dst$$reg),
 7763            as_Register($src1$$reg),
 7764            as_Register($src2$$reg));
 7765   %}
 7766 
 7767   ins_pipe(ialu_reg_reg);
 7768 %}
 7769 
 7770 // Immediate Or
 7771 instruct orI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7772   match(Set dst (OrI src1 src2));
 7773 
 7774   format %{ "ori  $dst, $src1, $src2\t#@orI_reg_imm" %}
 7775 
 7776   ins_cost(ALU_COST);
 7777   ins_encode %{
 7778     __ ori(as_Register($dst$$reg),
 7779            as_Register($src1$$reg),
 7780            (int32_t)($src2$$constant));
 7781   %}
 7782 
 7783   ins_pipe(ialu_reg_imm);
 7784 %}
 7785 
 7786 // Register Xor
 7787 instruct xorI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
 7788   match(Set dst (XorI src1 src2));
 7789 
 7790   format %{ "xorr  $dst, $src1, $src2\t#@xorI_reg_reg" %}
 7791 
 7792   ins_cost(ALU_COST);
 7793   ins_encode %{
 7794     __ xorr(as_Register($dst$$reg),
 7795             as_Register($src1$$reg),
 7796             as_Register($src2$$reg));
 7797   %}
 7798 
 7799   ins_pipe(ialu_reg_reg);
 7800 %}
 7801 
 7802 // Immediate Xor
 7803 instruct xorI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
 7804   match(Set dst (XorI src1 src2));
 7805 
 7806   format %{ "xori  $dst, $src1, $src2\t#@xorI_reg_imm" %}
 7807 
 7808   ins_cost(ALU_COST);
 7809   ins_encode %{
 7810     __ xori(as_Register($dst$$reg),
 7811             as_Register($src1$$reg),
 7812             (int32_t)($src2$$constant));
 7813   %}
 7814 
 7815   ins_pipe(ialu_reg_imm);
 7816 %}
 7817 
 7818 // Register And Long
 7819 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7820   match(Set dst (AndL src1 src2));
 7821 
 7822   format %{ "andr  $dst, $src1, $src2\t#@andL_reg_reg" %}
 7823 
 7824   ins_cost(ALU_COST);
 7825   ins_encode %{
 7826     __ andr(as_Register($dst$$reg),
 7827             as_Register($src1$$reg),
 7828             as_Register($src2$$reg));
 7829   %}
 7830 
 7831   ins_pipe(ialu_reg_reg);
 7832 %}
 7833 
 7834 // Immediate And Long
 7835 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7836   match(Set dst (AndL src1 src2));
 7837 
 7838   format %{ "andi  $dst, $src1, $src2\t#@andL_reg_imm" %}
 7839 
 7840   ins_cost(ALU_COST);
 7841   ins_encode %{
 7842     __ andi(as_Register($dst$$reg),
 7843             as_Register($src1$$reg),
 7844             (int32_t)($src2$$constant));
 7845   %}
 7846 
 7847   ins_pipe(ialu_reg_imm);
 7848 %}
 7849 
 7850 // Register Or Long
 7851 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7852   match(Set dst (OrL src1 src2));
 7853 
 7854   format %{ "orr  $dst, $src1, $src2\t#@orL_reg_reg" %}
 7855 
 7856   ins_cost(ALU_COST);
 7857   ins_encode %{
 7858     __ orr(as_Register($dst$$reg),
 7859            as_Register($src1$$reg),
 7860            as_Register($src2$$reg));
 7861   %}
 7862 
 7863   ins_pipe(ialu_reg_reg);
 7864 %}
 7865 
 7866 // Immediate Or Long
 7867 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7868   match(Set dst (OrL src1 src2));
 7869 
 7870   format %{ "ori  $dst, $src1, $src2\t#@orL_reg_imm" %}
 7871 
 7872   ins_cost(ALU_COST);
 7873   ins_encode %{
 7874     __ ori(as_Register($dst$$reg),
 7875            as_Register($src1$$reg),
 7876            (int32_t)($src2$$constant));
 7877   %}
 7878 
 7879   ins_pipe(ialu_reg_imm);
 7880 %}
 7881 
 7882 // Register Xor Long
 7883 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 7884   match(Set dst (XorL src1 src2));
 7885 
 7886   format %{ "xorr  $dst, $src1, $src2\t#@xorL_reg_reg" %}
 7887 
 7888   ins_cost(ALU_COST);
 7889   ins_encode %{
 7890     __ xorr(as_Register($dst$$reg),
 7891             as_Register($src1$$reg),
 7892             as_Register($src2$$reg));
 7893   %}
 7894 
 7895   ins_pipe(ialu_reg_reg);
 7896 %}
 7897 
 7898 // Immediate Xor Long
 7899 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
 7900   match(Set dst (XorL src1 src2));
 7901 
 7902   ins_cost(ALU_COST);
 7903   format %{ "xori  $dst, $src1, $src2\t#@xorL_reg_imm" %}
 7904 
 7905   ins_encode %{
 7906     __ xori(as_Register($dst$$reg),
 7907             as_Register($src1$$reg),
 7908             (int32_t)($src2$$constant));
 7909   %}
 7910 
 7911   ins_pipe(ialu_reg_imm);
 7912 %}
 7913 
 7914 // ============================================================================
 7915 // MemBar Instruction
 7916 
 7917 // RVTSO
 7918 
 7919 instruct unnecessary_membar_rvtso() %{
 7920   predicate(UseZtso);
 7921   match(LoadFence);
 7922   match(StoreFence);
 7923   match(StoreStoreFence);
 7924   match(MemBarAcquire);
 7925   match(MemBarRelease);
 7926   match(MemBarStoreStore);
 7927   match(MemBarAcquireLock);
 7928   match(MemBarReleaseLock);
 7929 
 7930   ins_cost(0);
 7931 
 7932   size(0);
 7933 
 7934   format %{ "#@unnecessary_membar_rvtso elided/tso (empty encoding)" %}
 7935   ins_encode %{
 7936     __ block_comment("unnecessary_membar_rvtso");
 7937   %}
 7938   ins_pipe(real_empty);
 7939 %}
 7940 
 7941 instruct membar_volatile_rvtso() %{
 7942   predicate(UseZtso);
 7943   match(MemBarVolatile);
 7944   ins_cost(VOLATILE_REF_COST);
 7945 
 7946   format %{ "#@membar_volatile_rvtso\n\t"
 7947             "fence w, r"%}
 7948 
 7949   ins_encode %{
 7950     __ block_comment("membar_volatile_rvtso");
 7951     __ membar(MacroAssembler::StoreLoad);
 7952   %}
 7953 
 7954   ins_pipe(pipe_slow);
 7955 %}
 7956 
 7957 instruct unnecessary_membar_volatile_rvtso() %{
 7958   predicate(UseZtso && Matcher::post_store_load_barrier(n));
 7959   match(MemBarVolatile);
 7960   ins_cost(0);
 7961 
 7962   size(0);
 7963   
 7964   format %{ "#@unnecessary_membar_volatile_rvtso (unnecessary so empty encoding)" %}
 7965   ins_encode %{
 7966     __ block_comment("unnecessary_membar_volatile_rvtso");
 7967   %}
 7968   ins_pipe(real_empty);
 7969 %}
 7970 
 7971 // RVWMO
 7972 
 7973 instruct membar_aqcuire_rvwmo() %{
 7974   predicate(!UseZtso);
 7975   match(LoadFence);
 7976   match(MemBarAcquire);
 7977   ins_cost(VOLATILE_REF_COST);
 7978 
 7979   format %{ "#@membar_aqcuire_rvwmo\n\t"
 7980             "fence r, rw" %}
 7981 
 7982   ins_encode %{
 7983     __ block_comment("membar_aqcuire_rvwmo");
 7984     __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 7985   %}
 7986   ins_pipe(pipe_serial);
 7987 %}
 7988 
 7989 instruct membar_release_rvwmo() %{
 7990   predicate(!UseZtso);
 7991   match(StoreFence);
 7992   match(MemBarRelease);
 7993   ins_cost(VOLATILE_REF_COST);
 7994 
 7995   format %{ "#@membar_release_rvwmo\n\t"
 7996             "fence rw, w" %}
 7997 
 7998   ins_encode %{
 7999     __ block_comment("membar_release_rvwmo");
 8000     __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
 8001   %}
 8002   ins_pipe(pipe_serial);
 8003 %}
 8004 
 8005 instruct membar_storestore_rvwmo() %{
 8006   predicate(!UseZtso);
 8007   match(MemBarStoreStore);
 8008   match(StoreStoreFence);
 8009   ins_cost(VOLATILE_REF_COST);
 8010 
 8011   format %{ "#@membar_storestore_rvwmo\n\t"
 8012             "fence w, w" %}
 8013 
 8014   ins_encode %{
 8015     __ membar(MacroAssembler::StoreStore);
 8016   %}
 8017   ins_pipe(pipe_serial);
 8018 %}
 8019 
 8020 instruct membar_volatile_rvwmo() %{
 8021   predicate(!UseZtso);
 8022   match(MemBarVolatile);
 8023   ins_cost(VOLATILE_REF_COST);
 8024 
 8025   format %{ "#@membar_volatile_rvwmo\n\t"
 8026             "fence w, r"%}
 8027 
 8028   ins_encode %{
 8029     __ block_comment("membar_volatile_rvwmo");
 8030     __ membar(MacroAssembler::StoreLoad);
 8031   %}
 8032 
 8033   ins_pipe(pipe_serial);
 8034 %}
 8035 
 8036 instruct membar_lock_rvwmo() %{
 8037   predicate(!UseZtso);
 8038   match(MemBarAcquireLock);
 8039   match(MemBarReleaseLock);
 8040   ins_cost(0);
 8041 
 8042   format %{ "#@membar_lock_rvwmo (elided)" %}
 8043 
 8044   ins_encode %{
 8045     __ block_comment("membar_lock_rvwmo (elided)");
 8046   %}
 8047 
 8048   ins_pipe(pipe_serial);
 8049 %}
 8050 
 8051 instruct unnecessary_membar_volatile_rvwmo() %{
 8052   predicate(!UseZtso && Matcher::post_store_load_barrier(n));
 8053   match(MemBarVolatile);
 8054   ins_cost(0);
 8055 
 8056   size(0);
 8057   format %{ "#@unnecessary_membar_volatile_rvwmo (unnecessary so empty encoding)" %}
 8058   ins_encode %{
 8059     __ block_comment("unnecessary_membar_volatile_rvwmo");
 8060   %}
 8061   ins_pipe(real_empty);
 8062 %}
 8063 
 8064 instruct spin_wait() %{
 8065   predicate(UseZihintpause);
 8066   match(OnSpinWait);
 8067   ins_cost(CACHE_MISS_COST);
 8068 
 8069   format %{ "spin_wait" %}
 8070 
 8071   ins_encode %{
 8072     __ pause();
 8073   %}
 8074 
 8075   ins_pipe(pipe_serial);
 8076 %}
 8077 
 8078 // ============================================================================
 8079 // Cast Instructions (Java-level type cast)
 8080 
 8081 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 8082   match(Set dst (CastX2P src));
 8083 
 8084   ins_cost(ALU_COST);
 8085   format %{ "mv  $dst, $src\t# long -> ptr, #@castX2P" %}
 8086 
 8087   ins_encode %{
 8088     if ($dst$$reg != $src$$reg) {
 8089       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8090     }
 8091   %}
 8092 
 8093   ins_pipe(ialu_reg);
 8094 %}
 8095 
 8096 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 8097   match(Set dst (CastP2X src));
 8098 
 8099   ins_cost(ALU_COST);
 8100   format %{ "mv  $dst, $src\t# ptr -> long, #@castP2X" %}
 8101 
 8102   ins_encode %{
 8103     if ($dst$$reg != $src$$reg) {
 8104       __ mv(as_Register($dst$$reg), as_Register($src$$reg));
 8105     }
 8106   %}
 8107 
 8108   ins_pipe(ialu_reg);
 8109 %}
 8110 
 8111 instruct castPP(iRegPNoSp dst)
 8112 %{
 8113   match(Set dst (CastPP dst));
 8114   ins_cost(0);
 8115 
 8116   size(0);
 8117   format %{ "# castPP of $dst, #@castPP" %}
 8118   ins_encode(/* empty encoding */);
 8119   ins_pipe(pipe_class_empty);
 8120 %}
 8121 
 8122 instruct castLL(iRegL dst)
 8123 %{
 8124   match(Set dst (CastLL dst));
 8125 
 8126   size(0);
 8127   format %{ "# castLL of $dst, #@castLL" %}
 8128   ins_encode(/* empty encoding */);
 8129   ins_cost(0);
 8130   ins_pipe(pipe_class_empty);
 8131 %}
 8132 
 8133 instruct castII(iRegI dst)
 8134 %{
 8135   match(Set dst (CastII dst));
 8136 
 8137   size(0);
 8138   format %{ "# castII of $dst, #@castII" %}
 8139   ins_encode(/* empty encoding */);
 8140   ins_cost(0);
 8141   ins_pipe(pipe_class_empty);
 8142 %}
 8143 
 8144 instruct checkCastPP(iRegPNoSp dst)
 8145 %{
 8146   match(Set dst (CheckCastPP dst));
 8147 
 8148   size(0);
 8149   ins_cost(0);
 8150   format %{ "# checkcastPP of $dst, #@checkCastPP" %}
 8151   ins_encode(/* empty encoding */);
 8152   ins_pipe(pipe_class_empty);
 8153 %}
 8154 
 8155 instruct castHH(fRegF dst)
 8156 %{
 8157   match(Set dst (CastHH dst));
 8158 
 8159   size(0);
 8160   format %{ "# castHH of $dst" %}
 8161   ins_encode(/* empty encoding */);
 8162   ins_cost(0);
 8163   ins_pipe(pipe_class_empty);
 8164 %}
 8165 
 8166 instruct castFF(fRegF dst)
 8167 %{
 8168   match(Set dst (CastFF dst));
 8169 
 8170   size(0);
 8171   format %{ "# castFF of $dst" %}
 8172   ins_encode(/* empty encoding */);
 8173   ins_cost(0);
 8174   ins_pipe(pipe_class_empty);
 8175 %}
 8176 
 8177 instruct castDD(fRegD dst)
 8178 %{
 8179   match(Set dst (CastDD dst));
 8180 
 8181   size(0);
 8182   format %{ "# castDD of $dst" %}
 8183   ins_encode(/* empty encoding */);
 8184   ins_cost(0);
 8185   ins_pipe(pipe_class_empty);
 8186 %}
 8187 
 8188 instruct castVV(vReg dst)
 8189 %{
 8190   match(Set dst (CastVV dst));
 8191 
 8192   size(0);
 8193   format %{ "# castVV of $dst" %}
 8194   ins_encode(/* empty encoding */);
 8195   ins_cost(0);
 8196   ins_pipe(pipe_class_empty);
 8197 %}
 8198 
 8199 // ============================================================================
 8200 // Convert Instructions
 8201 
 8202 // int to bool
 8203 instruct convI2Bool(iRegINoSp dst, iRegI src)
 8204 %{
 8205   match(Set dst (Conv2B src));
 8206 
 8207   ins_cost(ALU_COST);
 8208   format %{ "snez  $dst, $src\t#@convI2Bool" %}
 8209 
 8210   ins_encode %{
 8211     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8212   %}
 8213 
 8214   ins_pipe(ialu_reg);
 8215 %}
 8216 
 8217 // pointer to bool
 8218 instruct convP2Bool(iRegINoSp dst, iRegP src)
 8219 %{
 8220   match(Set dst (Conv2B src));
 8221 
 8222   ins_cost(ALU_COST);
 8223   format %{ "snez  $dst, $src\t#@convP2Bool" %}
 8224 
 8225   ins_encode %{
 8226     __ snez(as_Register($dst$$reg), as_Register($src$$reg));
 8227   %}
 8228 
 8229   ins_pipe(ialu_reg);
 8230 %}
 8231 
 8232 // int <-> long
 8233 
 8234 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
 8235 %{
 8236   match(Set dst (ConvI2L src));
 8237 
 8238   ins_cost(ALU_COST);
 8239   format %{ "addw  $dst, $src, zr\t#@convI2L_reg_reg" %}
 8240   ins_encode %{
 8241     __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8242   %}
 8243   ins_pipe(ialu_reg);
 8244 %}
 8245 
 8246 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
 8247   match(Set dst (ConvL2I src));
 8248 
 8249   ins_cost(ALU_COST);
 8250   format %{ "addw  $dst, $src, zr\t#@convL2I_reg" %}
 8251 
 8252   ins_encode %{
 8253     __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8254   %}
 8255 
 8256   ins_pipe(ialu_reg);
 8257 %}
 8258 
 8259 // int to unsigned long (Zero-extend)
 8260 instruct convI2UL_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
 8261 %{
 8262   match(Set dst (AndL (ConvI2L src) mask));
 8263 
 8264   ins_cost(ALU_COST * 2);
 8265   format %{ "zext $dst, $src, 32\t# i2ul, #@convI2UL_reg_reg" %}
 8266 
 8267   ins_encode %{
 8268     __ zext(as_Register($dst$$reg), as_Register($src$$reg), 32);
 8269   %}
 8270 
 8271   ins_pipe(ialu_reg_shift);
 8272 %}
 8273 
 8274 // float <-> double
 8275 
 8276 instruct convF2D_reg(fRegD dst, fRegF src) %{
 8277   match(Set dst (ConvF2D src));
 8278 
 8279   ins_cost(XFER_COST);
 8280   format %{ "fcvt.d.s  $dst, $src\t#@convF2D_reg" %}
 8281 
 8282   ins_encode %{
 8283     __ fcvt_d_s(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8284   %}
 8285 
 8286   ins_pipe(fp_f2d);
 8287 %}
 8288 
 8289 instruct convD2F_reg(fRegF dst, fRegD src) %{
 8290   match(Set dst (ConvD2F src));
 8291 
 8292   ins_cost(XFER_COST);
 8293   format %{ "fcvt.s.d  $dst, $src\t#@convD2F_reg" %}
 8294 
 8295   ins_encode %{
 8296     __ fcvt_s_d(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
 8297   %}
 8298 
 8299   ins_pipe(fp_d2f);
 8300 %}
 8301 
 8302 // single <-> half precision
 8303 
 8304 instruct convHF2F_reg_reg(fRegF dst, iRegINoSp src, iRegINoSp tmp) %{
 8305   match(Set dst (ConvHF2F src));
 8306   effect(TEMP tmp);
 8307   format %{ "fmv.h.x $dst, $src\t# move source from $src to $dst\n\t"
 8308             "fcvt.s.h $dst, $dst\t# convert half to single precision"
 8309   %}
 8310   ins_encode %{
 8311     __ float16_to_float($dst$$FloatRegister, $src$$Register, $tmp$$Register);
 8312   %}
 8313   ins_pipe(pipe_slow);
 8314 %}
 8315 
 8316 instruct convF2HF_reg_reg(iRegINoSp dst, fRegF src, fRegF ftmp, iRegINoSp xtmp) %{
 8317   match(Set dst (ConvF2HF src));
 8318   effect(TEMP_DEF dst, TEMP ftmp, TEMP xtmp);
 8319   format %{ "fcvt.h.s $ftmp, $src\t# convert single precision to half\n\t"
 8320             "fmv.x.h $dst, $ftmp\t# move result from $ftmp to $dst"
 8321   %}
 8322   ins_encode %{
 8323     __ float_to_float16($dst$$Register, $src$$FloatRegister, $ftmp$$FloatRegister, $xtmp$$Register);
 8324   %}
 8325   ins_pipe(pipe_slow);
 8326 %}
 8327 
 8328 // half precision operations
 8329 
 8330 instruct reinterpretS2HF(fRegF dst, iRegI src)
 8331 %{
 8332   match(Set dst (ReinterpretS2HF src));
 8333   format %{ "fmv.h.x $dst, $src" %}
 8334   ins_encode %{
 8335     __ fmv_h_x($dst$$FloatRegister, $src$$Register);
 8336   %}
 8337   ins_pipe(fp_i2f);
 8338 %}
 8339 
 8340 instruct convF2HFAndS2HF(fRegF dst, fRegF src)
 8341 %{
 8342   match(Set dst (ReinterpretS2HF (ConvF2HF src)));
 8343   format %{ "convF2HFAndS2HF $dst, $src" %}
 8344   ins_encode %{
 8345     __ fcvt_h_s($dst$$FloatRegister, $src$$FloatRegister);
 8346   %}
 8347   ins_pipe(fp_uop_s);
 8348 %}
 8349 
 8350 instruct reinterpretHF2S(iRegINoSp dst, fRegF src)
 8351 %{
 8352   match(Set dst (ReinterpretHF2S src));
 8353   format %{ "fmv.x.h $dst, $src" %}
 8354   ins_encode %{
 8355     __ fmv_x_h($dst$$Register, $src$$FloatRegister);
 8356   %}
 8357   ins_pipe(fp_f2i);
 8358 %}
 8359 
 8360 instruct convHF2SAndHF2F(fRegF dst, fRegF src)
 8361 %{
 8362   match(Set dst (ConvHF2F (ReinterpretHF2S src)));
 8363   format %{ "convHF2SAndHF2F $dst, $src" %}
 8364   ins_encode %{
 8365     __ fcvt_s_h($dst$$FloatRegister, $src$$FloatRegister);
 8366   %}
 8367   ins_pipe(fp_uop_s);
 8368 %}
 8369 
 8370 instruct sqrt_HF_reg(fRegF dst, fRegF src)
 8371 %{
 8372   match(Set dst (SqrtHF src));
 8373   format %{ "fsqrt.h $dst, $src" %}
 8374   ins_encode %{
 8375     __ fsqrt_h($dst$$FloatRegister, $src$$FloatRegister);
 8376   %}
 8377   ins_pipe(fp_sqrt_s);
 8378 %}
 8379 
 8380 instruct binOps_HF_reg(fRegF dst, fRegF src1, fRegF src2)
 8381 %{
 8382   match(Set dst (AddHF src1 src2));
 8383   match(Set dst (SubHF src1 src2));
 8384   match(Set dst (MulHF src1 src2));
 8385   match(Set dst (DivHF src1 src2));
 8386   format %{ "binop_hf $dst, $src1, $src2" %}
 8387   ins_encode %{
 8388     int opcode = this->ideal_Opcode();
 8389     switch(opcode) {
 8390       case Op_AddHF: __ fadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8391       case Op_SubHF: __ fsub_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8392       case Op_MulHF: __ fmul_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8393       case Op_DivHF: __ fdiv_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
 8394       default: assert(false, "%s is not supported here", NodeClassNames[opcode]); break;
 8395     }
 8396   %}
 8397   ins_pipe(fp_dop_reg_reg_s);
 8398 %}
 8399 
 8400 instruct min_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
 8401 %{
 8402   predicate(!UseZfa);
 8403   match(Set dst (MinHF src1 src2));
 8404   effect(KILL cr);
 8405 
 8406   format %{ "min_hf $dst, $src1, $src2" %}
 8407 
 8408   ins_encode %{
 8409     __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
 8410                  __ FLOAT_TYPE::half_precision, true /* is_min */);
 8411   %}
 8412   ins_pipe(pipe_class_default);
 8413 %}
 8414 
 8415 instruct min_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
 8416 %{
 8417   predicate(UseZfa);
 8418   match(Set dst (MinHF src1 src2));
 8419 
 8420   format %{ "min_hf $dst, $src1, $src2" %}
 8421 
 8422   ins_encode %{
 8423     __ fminm_h(as_FloatRegister($dst$$reg),
 8424                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 8425   %}
 8426 
 8427   ins_pipe(pipe_class_default);
 8428 %}
 8429 
 8430 instruct max_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
 8431 %{
 8432   predicate(!UseZfa);
 8433   match(Set dst (MaxHF src1 src2));
 8434   effect(KILL cr);
 8435 
 8436   format %{ "max_hf $dst, $src1, $src2" %}
 8437 
 8438   ins_encode %{
 8439     __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
 8440                  __ FLOAT_TYPE::half_precision, false /* is_min */);
 8441   %}
 8442   ins_pipe(pipe_class_default);
 8443 %}
 8444 
 8445 instruct max_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
 8446 %{
 8447   predicate(UseZfa);
 8448   match(Set dst (MaxHF src1 src2));
 8449 
 8450   format %{ "max_hf $dst, $src1, $src2" %}
 8451 
 8452   ins_encode %{
 8453     __ fmaxm_h(as_FloatRegister($dst$$reg),
 8454                as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
 8455   %}
 8456 
 8457   ins_pipe(pipe_class_default);
 8458 %}
 8459 
 8460 instruct fma_HF_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3)
 8461 %{
 8462   match(Set dst (FmaHF src3 (Binary src1 src2)));
 8463   format %{ "fmadd.h $dst, $src1, $src2, $src3\t# $dst = $src1 * $src2 + $src3 fma packedH" %}
 8464   ins_encode %{
 8465     __ fmadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
 8466   %}
 8467   ins_pipe(pipe_class_default);
 8468 %}
 8469 
 8470 // float <-> int
 8471 
 8472 instruct convF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 8473   match(Set dst (ConvF2I src));
 8474 
 8475   ins_cost(XFER_COST);
 8476   format %{ "fcvt.w.s  $dst, $src\t#@convF2I_reg_reg" %}
 8477 
 8478   ins_encode %{
 8479     __ fcvt_w_s_safe($dst$$Register, $src$$FloatRegister);
 8480   %}
 8481 
 8482   ins_pipe(fp_f2i);
 8483 %}
 8484 
 8485 instruct convI2F_reg_reg(fRegF dst, iRegIorL2I src) %{
 8486   match(Set dst (ConvI2F src));
 8487 
 8488   ins_cost(XFER_COST);
 8489   format %{ "fcvt.s.w  $dst, $src\t#@convI2F_reg_reg" %}
 8490 
 8491   ins_encode %{
 8492     __ fcvt_s_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8493   %}
 8494 
 8495   ins_pipe(fp_i2f);
 8496 %}
 8497 
 8498 // float <-> long
 8499 
 8500 instruct convF2L_reg_reg(iRegLNoSp dst, fRegF src) %{
 8501   match(Set dst (ConvF2L src));
 8502 
 8503   ins_cost(XFER_COST);
 8504   format %{ "fcvt.l.s  $dst, $src\t#@convF2L_reg_reg" %}
 8505 
 8506   ins_encode %{
 8507     __ fcvt_l_s_safe($dst$$Register, $src$$FloatRegister);
 8508   %}
 8509 
 8510   ins_pipe(fp_f2l);
 8511 %}
 8512 
 8513 instruct convL2F_reg_reg(fRegF dst, iRegL src) %{
 8514   match(Set dst (ConvL2F src));
 8515 
 8516   ins_cost(XFER_COST);
 8517   format %{ "fcvt.s.l  $dst, $src\t#@convL2F_reg_reg" %}
 8518 
 8519   ins_encode %{
 8520     __ fcvt_s_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8521   %}
 8522 
 8523   ins_pipe(fp_l2f);
 8524 %}
 8525 
 8526 // double <-> int
 8527 
 8528 instruct convD2I_reg_reg(iRegINoSp dst, fRegD src) %{
 8529   match(Set dst (ConvD2I src));
 8530 
 8531   ins_cost(XFER_COST);
 8532   format %{ "fcvt.w.d  $dst, $src\t#@convD2I_reg_reg" %}
 8533 
 8534   ins_encode %{
 8535     __ fcvt_w_d_safe($dst$$Register, $src$$FloatRegister);
 8536   %}
 8537 
 8538   ins_pipe(fp_d2i);
 8539 %}
 8540 
 8541 instruct convI2D_reg_reg(fRegD dst, iRegIorL2I src) %{
 8542   match(Set dst (ConvI2D src));
 8543 
 8544   ins_cost(XFER_COST);
 8545   format %{ "fcvt.d.w  $dst, $src\t#@convI2D_reg_reg" %}
 8546 
 8547   ins_encode %{
 8548     __ fcvt_d_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8549   %}
 8550 
 8551   ins_pipe(fp_i2d);
 8552 %}
 8553 
 8554 // double <-> long
 8555 
 8556 instruct convD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 8557   match(Set dst (ConvD2L src));
 8558 
 8559   ins_cost(XFER_COST);
 8560   format %{ "fcvt.l.d  $dst, $src\t#@convD2L_reg_reg" %}
 8561 
 8562   ins_encode %{
 8563     __ fcvt_l_d_safe($dst$$Register, $src$$FloatRegister);
 8564   %}
 8565 
 8566   ins_pipe(fp_d2l);
 8567 %}
 8568 
 8569 instruct convL2D_reg_reg(fRegD dst, iRegL src) %{
 8570   match(Set dst (ConvL2D src));
 8571 
 8572   ins_cost(XFER_COST);
 8573   format %{ "fcvt.d.l  $dst, $src\t#@convL2D_reg_reg" %}
 8574 
 8575   ins_encode %{
 8576     __ fcvt_d_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8577   %}
 8578 
 8579   ins_pipe(fp_l2d);
 8580 %}
 8581 
 8582 // Convert oop into int for vectors alignment masking
 8583 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8584   match(Set dst (ConvL2I (CastP2X src)));
 8585 
 8586   ins_cost(ALU_COST * 2);
 8587   format %{ "zext $dst, $src, 32\t# ptr -> int, #@convP2I" %}
 8588 
 8589   ins_encode %{
 8590     __ zext($dst$$Register, $src$$Register, 32);
 8591   %}
 8592 
 8593   ins_pipe(ialu_reg);
 8594 %}
 8595 
 8596 // Convert compressed oop into int for vectors alignment masking
 8597 // in case of 32bit oops (heap < 4Gb).
 8598 instruct convN2I(iRegINoSp dst, iRegN src)
 8599 %{
 8600   predicate(CompressedOops::shift() == 0);
 8601   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8602 
 8603   ins_cost(ALU_COST);
 8604   format %{ "mv  $dst, $src\t# compressed ptr -> int, #@convN2I" %}
 8605 
 8606   ins_encode %{
 8607     __ mv($dst$$Register, $src$$Register);
 8608   %}
 8609 
 8610   ins_pipe(ialu_reg);
 8611 %}
 8612 
 8613 instruct round_double_reg(iRegLNoSp dst, fRegD src, fRegD ftmp) %{
 8614   match(Set dst (RoundD src));
 8615 
 8616   ins_cost(XFER_COST + BRANCH_COST);
 8617   effect(TEMP ftmp);
 8618   format %{ "java_round_double $dst, $src\t#@round_double_reg" %}
 8619 
 8620   ins_encode %{
 8621     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8622   %}
 8623 
 8624   ins_pipe(pipe_slow);
 8625 %}
 8626 
 8627 instruct round_float_reg(iRegINoSp dst, fRegF src, fRegF ftmp) %{
 8628   match(Set dst (RoundF src));
 8629 
 8630   ins_cost(XFER_COST + BRANCH_COST);
 8631   effect(TEMP ftmp);
 8632   format %{ "java_round_float $dst, $src\t#@round_float_reg" %}
 8633 
 8634   ins_encode %{
 8635     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
 8636   %}
 8637 
 8638   ins_pipe(pipe_slow);
 8639 %}
 8640 
 8641 // Convert oop pointer into compressed form
 8642 instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{
 8643   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8644   match(Set dst (EncodeP src));
 8645   ins_cost(ALU_COST);
 8646   format %{ "encode_heap_oop  $dst, $src\t#@encodeHeapOop" %}
 8647   ins_encode %{
 8648     Register s = $src$$Register;
 8649     Register d = $dst$$Register;
 8650     __ encode_heap_oop(d, s);
 8651   %}
 8652   ins_pipe(pipe_class_default);
 8653 %}
 8654 
 8655 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src) %{
 8656   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8657   match(Set dst (EncodeP src));
 8658   ins_cost(ALU_COST);
 8659   format %{ "encode_heap_oop_not_null $dst, $src\t#@encodeHeapOop_not_null" %}
 8660   ins_encode %{
 8661     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8662   %}
 8663   ins_pipe(pipe_class_default);
 8664 %}
 8665 
 8666 instruct decodeHeapOop(iRegPNoSp dst, iRegN src) %{
 8667   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8668             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8669   match(Set dst (DecodeN src));
 8670 
 8671   ins_cost(0);
 8672   format %{ "decode_heap_oop  $dst, $src\t#@decodeHeapOop" %}
 8673   ins_encode %{
 8674     Register s = $src$$Register;
 8675     Register d = $dst$$Register;
 8676     __ decode_heap_oop(d, s);
 8677   %}
 8678   ins_pipe(pipe_class_default);
 8679 %}
 8680 
 8681 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src) %{
 8682   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8683             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8684   match(Set dst (DecodeN src));
 8685 
 8686   ins_cost(0);
 8687   format %{ "decode_heap_oop_not_null $dst, $src\t#@decodeHeapOop_not_null" %}
 8688   ins_encode %{
 8689     Register s = $src$$Register;
 8690     Register d = $dst$$Register;
 8691     __ decode_heap_oop_not_null(d, s);
 8692   %}
 8693   ins_pipe(pipe_class_default);
 8694 %}
 8695 
 8696 // Convert klass pointer into compressed form.
 8697 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8698   match(Set dst (EncodePKlass src));
 8699 
 8700   ins_cost(ALU_COST);
 8701   format %{ "encode_klass_not_null  $dst, $src\t#@encodeKlass_not_null" %}
 8702 
 8703   ins_encode %{
 8704     Register src_reg = as_Register($src$$reg);
 8705     Register dst_reg = as_Register($dst$$reg);
 8706     __ encode_klass_not_null(dst_reg, src_reg, t0);
 8707   %}
 8708 
 8709    ins_pipe(pipe_class_default);
 8710 %}
 8711 
 8712 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src, iRegPNoSp tmp) %{
 8713   match(Set dst (DecodeNKlass src));
 8714 
 8715   effect(TEMP tmp);
 8716 
 8717   ins_cost(ALU_COST);
 8718   format %{ "decode_klass_not_null  $dst, $src\t#@decodeKlass_not_null" %}
 8719 
 8720   ins_encode %{
 8721     Register src_reg = as_Register($src$$reg);
 8722     Register dst_reg = as_Register($dst$$reg);
 8723     Register tmp_reg = as_Register($tmp$$reg);
 8724     __ decode_klass_not_null(dst_reg, src_reg, tmp_reg);
 8725   %}
 8726 
 8727    ins_pipe(pipe_class_default);
 8728 %}
 8729 
 8730 // stack <-> reg and reg <-> reg shuffles with no conversion
 8731 
 8732 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
 8733 
 8734   match(Set dst (MoveF2I src));
 8735 
 8736   effect(DEF dst, USE src);
 8737 
 8738   ins_cost(LOAD_COST);
 8739 
 8740   format %{ "lw  $dst, $src\t#@MoveF2I_stack_reg" %}
 8741 
 8742   ins_encode %{
 8743     __ lw(as_Register($dst$$reg), Address(sp, $src$$disp));
 8744   %}
 8745 
 8746   ins_pipe(iload_reg_reg);
 8747 
 8748 %}
 8749 
 8750 instruct MoveI2F_stack_reg(fRegF dst, stackSlotI src) %{
 8751 
 8752   match(Set dst (MoveI2F src));
 8753 
 8754   effect(DEF dst, USE src);
 8755 
 8756   ins_cost(LOAD_COST);
 8757 
 8758   format %{ "flw  $dst, $src\t#@MoveI2F_stack_reg" %}
 8759 
 8760   ins_encode %{
 8761     __ flw(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 8762   %}
 8763 
 8764   ins_pipe(fp_load_mem_s);
 8765 
 8766 %}
 8767 
 8768 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
 8769 
 8770   match(Set dst (MoveD2L src));
 8771 
 8772   effect(DEF dst, USE src);
 8773 
 8774   ins_cost(LOAD_COST);
 8775 
 8776   format %{ "ld  $dst, $src\t#@MoveD2L_stack_reg" %}
 8777 
 8778   ins_encode %{
 8779     __ ld(as_Register($dst$$reg), Address(sp, $src$$disp));
 8780   %}
 8781 
 8782   ins_pipe(iload_reg_reg);
 8783 
 8784 %}
 8785 
 8786 instruct MoveL2D_stack_reg(fRegD dst, stackSlotL src) %{
 8787 
 8788   match(Set dst (MoveL2D src));
 8789 
 8790   effect(DEF dst, USE src);
 8791 
 8792   ins_cost(LOAD_COST);
 8793 
 8794   format %{ "fld  $dst, $src\t#@MoveL2D_stack_reg" %}
 8795 
 8796   ins_encode %{
 8797     __ fld(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
 8798   %}
 8799 
 8800   ins_pipe(fp_load_mem_d);
 8801 
 8802 %}
 8803 
 8804 instruct MoveF2I_reg_stack(stackSlotI dst, fRegF src) %{
 8805 
 8806   match(Set dst (MoveF2I src));
 8807 
 8808   effect(DEF dst, USE src);
 8809 
 8810   ins_cost(STORE_COST);
 8811 
 8812   format %{ "fsw  $src, $dst\t#@MoveF2I_reg_stack" %}
 8813 
 8814   ins_encode %{
 8815     __ fsw(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 8816   %}
 8817 
 8818   ins_pipe(fp_store_reg_s);
 8819 
 8820 %}
 8821 
 8822 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
 8823 
 8824   match(Set dst (MoveI2F src));
 8825 
 8826   effect(DEF dst, USE src);
 8827 
 8828   ins_cost(STORE_COST);
 8829 
 8830   format %{ "sw  $src, $dst\t#@MoveI2F_reg_stack" %}
 8831 
 8832   ins_encode %{
 8833     __ sw(as_Register($src$$reg), Address(sp, $dst$$disp));
 8834   %}
 8835 
 8836   ins_pipe(istore_reg_reg);
 8837 
 8838 %}
 8839 
 8840 instruct MoveD2L_reg_stack(stackSlotL dst, fRegD src) %{
 8841 
 8842   match(Set dst (MoveD2L src));
 8843 
 8844   effect(DEF dst, USE src);
 8845 
 8846   ins_cost(STORE_COST);
 8847 
 8848   format %{ "fsd  $dst, $src\t#@MoveD2L_reg_stack" %}
 8849 
 8850   ins_encode %{
 8851     __ fsd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
 8852   %}
 8853 
 8854   ins_pipe(fp_store_reg_d);
 8855 
 8856 %}
 8857 
 8858 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
 8859 
 8860   match(Set dst (MoveL2D src));
 8861 
 8862   effect(DEF dst, USE src);
 8863 
 8864   ins_cost(STORE_COST);
 8865 
 8866   format %{ "sd  $src, $dst\t#@MoveL2D_reg_stack" %}
 8867 
 8868   ins_encode %{
 8869     __ sd(as_Register($src$$reg), Address(sp, $dst$$disp));
 8870   %}
 8871 
 8872   ins_pipe(istore_reg_reg);
 8873 
 8874 %}
 8875 
 8876 instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{
 8877 
 8878   match(Set dst (MoveF2I src));
 8879 
 8880   effect(DEF dst, USE src);
 8881 
 8882   ins_cost(FMVX_COST);
 8883 
 8884   format %{ "fmv.x.w  $dst, $src\t#@MoveF2I_reg_reg" %}
 8885 
 8886   ins_encode %{
 8887     __ fmv_x_w(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 8888   %}
 8889 
 8890   ins_pipe(fp_f2i);
 8891 
 8892 %}
 8893 
 8894 instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{
 8895 
 8896   match(Set dst (MoveI2F src));
 8897 
 8898   effect(DEF dst, USE src);
 8899 
 8900   ins_cost(FMVX_COST);
 8901 
 8902   format %{ "fmv.w.x  $dst, $src\t#@MoveI2F_reg_reg" %}
 8903 
 8904   ins_encode %{
 8905     __ fmv_w_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8906   %}
 8907 
 8908   ins_pipe(fp_i2f);
 8909 
 8910 %}
 8911 
 8912 instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
 8913 
 8914   match(Set dst (MoveD2L src));
 8915 
 8916   effect(DEF dst, USE src);
 8917 
 8918   ins_cost(FMVX_COST);
 8919 
 8920   format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %}
 8921 
 8922   ins_encode %{
 8923     __ fmv_x_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
 8924   %}
 8925 
 8926   ins_pipe(fp_d2l);
 8927 
 8928 %}
 8929 
 8930 instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{
 8931 
 8932   match(Set dst (MoveL2D src));
 8933 
 8934   effect(DEF dst, USE src);
 8935 
 8936   ins_cost(FMVX_COST);
 8937 
 8938   format %{ "fmv.d.x  $dst, $src\t#@MoveL2D_reg_reg" %}
 8939 
 8940   ins_encode %{
 8941     __ fmv_d_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
 8942   %}
 8943 
 8944   ins_pipe(fp_l2d);
 8945 
 8946 %}
 8947 
 8948 // ============================================================================
 8949 // Compare Instructions which set the result float comparisons in dest register.
 8950 
 8951 instruct cmpF3_reg_reg(iRegINoSp dst, fRegF op1, fRegF op2)
 8952 %{
 8953   match(Set dst (CmpF3 op1 op2));
 8954 
 8955   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 8956   format %{ "flt.s  $dst, $op2, $op1\t#@cmpF3_reg_reg\n\t"
 8957             "bgtz   $dst, done\n\t"
 8958             "feq.s  $dst, $op1, $op2\n\t"
 8959             "addi   $dst, $dst, -1\n\t"
 8960             "done:"
 8961   %}
 8962 
 8963   ins_encode %{
 8964     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 8965     __ float_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg),
 8966                      as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 8967   %}
 8968 
 8969   ins_pipe(pipe_class_default);
 8970 %}
 8971 
 8972 instruct cmpD3_reg_reg(iRegINoSp dst, fRegD op1, fRegD op2)
 8973 %{
 8974   match(Set dst (CmpD3 op1 op2));
 8975 
 8976   ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
 8977   format %{ "flt.d  $dst, $op2, $op1\t#@cmpD3_reg_reg\n\t"
 8978             "bgtz   $dst, done\n\t"
 8979             "feq.d  $dst, $op1, $op2\n\t"
 8980             "addi   $dst, $dst, -1\n\t"
 8981             "done:"
 8982   %}
 8983 
 8984   ins_encode %{
 8985     // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
 8986     __ double_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
 8987   %}
 8988 
 8989   ins_pipe(pipe_class_default);
 8990 %}
 8991 
 8992 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 8993 %{
 8994   match(Set dst (CmpL3 op1 op2));
 8995 
 8996   ins_cost(ALU_COST * 3 + BRANCH_COST);
 8997   format %{ "slt   $dst, $op2, $op1\t#@cmpL3_reg_reg\n\t"
 8998             "bnez  $dst, done\n\t"
 8999             "slt   $dst, $op1, $op2\n\t"
 9000             "neg   $dst, $dst\n\t"
 9001             "done:"
 9002   %}
 9003   ins_encode %{
 9004     __ cmp_l2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 9005     __ mv(as_Register($dst$$reg), t0);
 9006   %}
 9007 
 9008   ins_pipe(pipe_class_default);
 9009 %}
 9010 
 9011 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
 9012 %{
 9013   match(Set dst (CmpUL3 op1 op2));
 9014 
 9015   ins_cost(ALU_COST * 3 + BRANCH_COST);
 9016   format %{ "sltu  $dst, $op2, $op1\t#@cmpUL3_reg_reg\n\t"
 9017             "bnez  $dst, done\n\t"
 9018             "sltu  $dst, $op1, $op2\n\t"
 9019             "neg   $dst, $dst\n\t"
 9020             "done:"
 9021   %}
 9022   ins_encode %{
 9023     __ cmp_ul2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 9024     __ mv(as_Register($dst$$reg), t0);
 9025   %}
 9026 
 9027   ins_pipe(pipe_class_default);
 9028 %}
 9029 
 9030 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI op1, iRegI op2)
 9031 %{
 9032   match(Set dst (CmpU3 op1 op2));
 9033 
 9034   ins_cost(ALU_COST * 3 + BRANCH_COST);
 9035   format %{ "sltu  $dst, $op2, $op1\t#@cmpU3_reg_reg\n\t"
 9036             "bnez  $dst, done\n\t"
 9037             "sltu  $dst, $op1, $op2\n\t"
 9038             "neg   $dst, $dst\n\t"
 9039             "done:"
 9040   %}
 9041   ins_encode %{
 9042     __ cmp_uw2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
 9043     __ mv(as_Register($dst$$reg), t0);
 9044   %}
 9045 
 9046   ins_pipe(pipe_class_default);
 9047 %}
 9048 
 9049 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q)
 9050 %{
 9051   match(Set dst (CmpLTMask p q));
 9052 
 9053   ins_cost(2 * ALU_COST);
 9054 
 9055   format %{ "slt $dst, $p, $q\t#@cmpLTMask_reg_reg\n\t"
 9056             "subw $dst, zr, $dst\t#@cmpLTMask_reg_reg"
 9057   %}
 9058 
 9059   ins_encode %{
 9060     __ slt(as_Register($dst$$reg), as_Register($p$$reg), as_Register($q$$reg));
 9061     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
 9062   %}
 9063 
 9064   ins_pipe(ialu_reg_reg);
 9065 %}
 9066 
 9067 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I op, immI0 zero)
 9068 %{
 9069   match(Set dst (CmpLTMask op zero));
 9070 
 9071   ins_cost(ALU_COST);
 9072 
 9073   format %{ "sraiw $dst, $dst, 31\t#@cmpLTMask_reg_reg" %}
 9074 
 9075   ins_encode %{
 9076     __ sraiw(as_Register($dst$$reg), as_Register($op$$reg), 31);
 9077   %}
 9078 
 9079   ins_pipe(ialu_reg_shift);
 9080 %}
 9081 
 9082 
 9083 // ============================================================================
 9084 // Max and Min
 9085 
 9086 instruct minI_reg_reg(iRegINoSp dst, iRegI src)
 9087 %{
 9088   match(Set dst (MinI dst src));
 9089 
 9090   ins_cost(BRANCH_COST + ALU_COST);
 9091   format %{"minI_reg_reg $dst, $dst, $src\t#@minI_reg_reg\n\t"%}
 9092 
 9093   ins_encode %{
 9094     __ cmov_gt(as_Register($dst$$reg), as_Register($src$$reg),
 9095                as_Register($dst$$reg), as_Register($src$$reg));
 9096   %}
 9097 
 9098   ins_pipe(pipe_class_compare);
 9099 %}
 9100 
 9101 instruct maxI_reg_reg(iRegINoSp dst, iRegI src)
 9102 %{
 9103   match(Set dst (MaxI dst src));
 9104 
 9105   ins_cost(BRANCH_COST + ALU_COST);
 9106   format %{"maxI_reg_reg $dst, $dst, $src\t#@maxI_reg_reg\n\t"%}
 9107 
 9108   ins_encode %{
 9109     __ cmov_lt(as_Register($dst$$reg), as_Register($src$$reg),
 9110                as_Register($dst$$reg), as_Register($src$$reg));
 9111   %}
 9112 
 9113   ins_pipe(pipe_class_compare);
 9114 %}
 9115 
 9116 // special case for comparing with zero
 9117 // n.b. this is selected in preference to the rule above because it
 9118 // avoids loading constant 0 into a source register
 9119 
 9120 instruct minI_reg_zero(iRegINoSp dst, immI0 zero)
 9121 %{
 9122   match(Set dst (MinI dst zero));
 9123   match(Set dst (MinI zero dst));
 9124 
 9125   ins_cost(BRANCH_COST + ALU_COST);
 9126   format %{"minI_reg_zero $dst, $dst, zr\t#@minI_reg_zero\n\t"%}
 9127 
 9128   ins_encode %{
 9129     __ cmov_gt(as_Register($dst$$reg), zr,
 9130                as_Register($dst$$reg), zr);
 9131   %}
 9132 
 9133   ins_pipe(pipe_class_compare);
 9134 %}
 9135 
 9136 instruct maxI_reg_zero(iRegINoSp dst, immI0 zero)
 9137 %{
 9138   match(Set dst (MaxI dst zero));
 9139   match(Set dst (MaxI zero dst));
 9140 
 9141   ins_cost(BRANCH_COST + ALU_COST);
 9142   format %{"maxI_reg_zero $dst, $dst, zr\t#@maxI_reg_zero\n\t"%}
 9143 
 9144   ins_encode %{
 9145     __ cmov_lt(as_Register($dst$$reg), zr,
 9146                as_Register($dst$$reg), zr);
 9147   %}
 9148 
 9149   ins_pipe(pipe_class_compare);
 9150 %}
 9151 
 9152 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 9153 %{
 9154   match(Set dst (MinI src1 src2));
 9155 
 9156   effect(DEF dst, USE src1, USE src2);
 9157 
 9158   ins_cost(BRANCH_COST + ALU_COST * 2);
 9159   format %{"minI_rReg $dst, $src1, $src2\t#@minI_rReg\n\t"%}
 9160 
 9161   ins_encode %{
 9162     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 9163     __ cmov_gt(as_Register($src1$$reg), as_Register($src2$$reg),
 9164                as_Register($dst$$reg), as_Register($src2$$reg));
 9165   %}
 9166 
 9167   ins_pipe(pipe_class_compare);
 9168 %}
 9169 
 9170 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
 9171 %{
 9172   match(Set dst (MaxI src1 src2));
 9173 
 9174   effect(DEF dst, USE src1, USE src2);
 9175 
 9176   ins_cost(BRANCH_COST + ALU_COST * 2);
 9177   format %{"maxI_rReg $dst, $src1, $src2\t#@maxI_rReg\n\t"%}
 9178 
 9179   ins_encode %{
 9180     __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
 9181     __ cmov_lt(as_Register($src1$$reg), as_Register($src2$$reg),
 9182                as_Register($dst$$reg), as_Register($src2$$reg));
 9183   %}
 9184 
 9185   ins_pipe(pipe_class_compare);
 9186 %}
 9187 
 9188 // ============================================================================
 9189 // Branch Instructions
 9190 // Direct Branch.
 9191 instruct branch(label lbl)
 9192 %{
 9193   match(Goto);
 9194 
 9195   effect(USE lbl);
 9196 
 9197   ins_cost(BRANCH_COST);
 9198   format %{ "j  $lbl\t#@branch" %}
 9199 
 9200   ins_encode(riscv_enc_j(lbl));
 9201 
 9202   ins_pipe(pipe_branch);
 9203 %}
 9204 
 9205 // ============================================================================
 9206 // Compare and Branch Instructions
 9207 
 9208 // Patterns for short (< 12KiB) variants
 9209 
 9210 // Compare flags and branch near instructions.
 9211 instruct cmpFlag_branch(cmpOpEqNe cmp, rFlagsReg cr, label lbl) %{
 9212   match(If cmp cr);
 9213   effect(USE lbl);
 9214 
 9215   ins_cost(BRANCH_COST);
 9216   format %{ "b$cmp  $cr, zr, $lbl\t#@cmpFlag_branch" %}
 9217 
 9218   ins_encode %{
 9219     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label));
 9220   %}
 9221   ins_pipe(pipe_cmpz_branch);
 9222   ins_short_branch(1);
 9223 %}
 9224 
 9225 // Compare signed int and branch near instructions
 9226 instruct cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 9227 %{
 9228   // Same match rule as `far_cmpI_branch'.
 9229   match(If cmp (CmpI op1 op2));
 9230 
 9231   effect(USE lbl);
 9232 
 9233   ins_cost(BRANCH_COST);
 9234 
 9235   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_branch" %}
 9236 
 9237   ins_encode %{
 9238     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9239   %}
 9240 
 9241   ins_pipe(pipe_cmp_branch);
 9242   ins_short_branch(1);
 9243 %}
 9244 
 9245 instruct cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
 9246 %{
 9247   // Same match rule as `far_cmpI_loop'.
 9248   match(CountedLoopEnd cmp (CmpI op1 op2));
 9249 
 9250   effect(USE lbl);
 9251 
 9252   ins_cost(BRANCH_COST);
 9253 
 9254   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpI_loop" %}
 9255 
 9256   ins_encode %{
 9257     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9258   %}
 9259 
 9260   ins_pipe(pipe_cmp_branch);
 9261   ins_short_branch(1);
 9262 %}
 9263 
 9264 // Compare unsigned int and branch near instructions
 9265 instruct cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl)
 9266 %{
 9267   // Same match rule as `far_cmpU_branch'.
 9268   match(If cmp (CmpU op1 op2));
 9269 
 9270   effect(USE lbl);
 9271 
 9272   ins_cost(BRANCH_COST);
 9273 
 9274   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpU_branch" %}
 9275 
 9276   ins_encode %{
 9277     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9278                   as_Register($op2$$reg), *($lbl$$label));
 9279   %}
 9280 
 9281   ins_pipe(pipe_cmp_branch);
 9282   ins_short_branch(1);
 9283 %}
 9284 
 9285 // Compare signed long and branch near instructions
 9286 instruct cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9287 %{
 9288   // Same match rule as `far_cmpL_branch'.
 9289   match(If cmp (CmpL op1 op2));
 9290 
 9291   effect(USE lbl);
 9292 
 9293   ins_cost(BRANCH_COST);
 9294 
 9295   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_branch" %}
 9296 
 9297   ins_encode %{
 9298     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9299   %}
 9300 
 9301   ins_pipe(pipe_cmp_branch);
 9302   ins_short_branch(1);
 9303 %}
 9304 
 9305 instruct cmpL_loop(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
 9306 %{
 9307   // Same match rule as `far_cmpL_loop'.
 9308   match(CountedLoopEnd cmp (CmpL op1 op2));
 9309 
 9310   effect(USE lbl);
 9311 
 9312   ins_cost(BRANCH_COST);
 9313 
 9314   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpL_loop" %}
 9315 
 9316   ins_encode %{
 9317     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
 9318   %}
 9319 
 9320   ins_pipe(pipe_cmp_branch);
 9321   ins_short_branch(1);
 9322 %}
 9323 
 9324 // Compare unsigned long and branch near instructions
 9325 instruct cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl)
 9326 %{
 9327   // Same match rule as `far_cmpUL_branch'.
 9328   match(If cmp (CmpUL op1 op2));
 9329 
 9330   effect(USE lbl);
 9331 
 9332   ins_cost(BRANCH_COST);
 9333   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpUL_branch" %}
 9334 
 9335   ins_encode %{
 9336     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9337                   as_Register($op2$$reg), *($lbl$$label));
 9338   %}
 9339 
 9340   ins_pipe(pipe_cmp_branch);
 9341   ins_short_branch(1);
 9342 %}
 9343 
 9344 // Compare pointer and branch near instructions
 9345 instruct cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9346 %{
 9347   // Same match rule as `far_cmpP_branch'.
 9348   match(If cmp (CmpP op1 op2));
 9349 
 9350   effect(USE lbl);
 9351 
 9352   ins_cost(BRANCH_COST);
 9353 
 9354   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpP_branch" %}
 9355 
 9356   ins_encode %{
 9357     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9358                   as_Register($op2$$reg), *($lbl$$label));
 9359   %}
 9360 
 9361   ins_pipe(pipe_cmp_branch);
 9362   ins_short_branch(1);
 9363 %}
 9364 
 9365 // Compare narrow pointer and branch near instructions
 9366 instruct cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9367 %{
 9368   // Same match rule as `far_cmpN_branch'.
 9369   match(If cmp (CmpN op1 op2));
 9370 
 9371   effect(USE lbl);
 9372 
 9373   ins_cost(BRANCH_COST);
 9374 
 9375   format %{ "b$cmp  $op1, $op2, $lbl\t#@cmpN_branch" %}
 9376 
 9377   ins_encode %{
 9378     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9379                   as_Register($op2$$reg), *($lbl$$label));
 9380   %}
 9381 
 9382   ins_pipe(pipe_cmp_branch);
 9383   ins_short_branch(1);
 9384 %}
 9385 
 9386 // Compare float and branch near instructions
 9387 instruct cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
 9388 %{
 9389   // Same match rule as `far_cmpF_branch'.
 9390   match(If cmp (CmpF op1 op2));
 9391 
 9392   effect(USE lbl);
 9393 
 9394   ins_cost(XFER_COST + BRANCH_COST);
 9395   format %{ "float_b$cmp $op1, $op2, $lbl \t#@cmpF_branch"%}
 9396 
 9397   ins_encode %{
 9398     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label));
 9399   %}
 9400 
 9401   ins_pipe(pipe_class_compare);
 9402   ins_short_branch(1);
 9403 %}
 9404 
 9405 // Compare double and branch near instructions
 9406 instruct cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
 9407 %{
 9408   // Same match rule as `far_cmpD_branch'.
 9409   match(If cmp (CmpD op1 op2));
 9410   effect(USE lbl);
 9411 
 9412   ins_cost(XFER_COST + BRANCH_COST);
 9413   format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_branch"%}
 9414 
 9415   ins_encode %{
 9416     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
 9417                         as_FloatRegister($op2$$reg), *($lbl$$label));
 9418   %}
 9419 
 9420   ins_pipe(pipe_class_compare);
 9421   ins_short_branch(1);
 9422 %}
 9423 
 9424 // Compare signed int with zero and branch near instructions
 9425 instruct cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9426 %{
 9427   // Same match rule as `far_cmpI_reg_imm0_branch'.
 9428   match(If cmp (CmpI op1 zero));
 9429 
 9430   effect(USE op1, USE lbl);
 9431 
 9432   ins_cost(BRANCH_COST);
 9433   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_branch" %}
 9434 
 9435   ins_encode %{
 9436     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9437   %}
 9438 
 9439   ins_pipe(pipe_cmpz_branch);
 9440   ins_short_branch(1);
 9441 %}
 9442 
 9443 instruct cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9444 %{
 9445   // Same match rule as `far_cmpI_reg_imm0_loop'.
 9446   match(CountedLoopEnd cmp (CmpI op1 zero));
 9447 
 9448   effect(USE op1, USE lbl);
 9449 
 9450   ins_cost(BRANCH_COST);
 9451 
 9452   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpI_reg_imm0_loop" %}
 9453 
 9454   ins_encode %{
 9455     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9456   %}
 9457 
 9458   ins_pipe(pipe_cmpz_branch);
 9459   ins_short_branch(1);
 9460 %}
 9461 
 9462 // Compare unsigned int with zero and branch near instructions
 9463 instruct cmpUEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
 9464 %{
 9465   // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_branch'.
 9466   match(If cmp (CmpU op1 zero));
 9467 
 9468   effect(USE op1, USE lbl);
 9469 
 9470   ins_cost(BRANCH_COST);
 9471 
 9472   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_branch" %}
 9473 
 9474   ins_encode %{
 9475     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9476   %}
 9477 
 9478   ins_pipe(pipe_cmpz_branch);
 9479   ins_short_branch(1);
 9480 %}
 9481 
 9482 // Compare signed long with zero and branch near instructions
 9483 instruct cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9484 %{
 9485   // Same match rule as `far_cmpL_reg_imm0_branch'.
 9486   match(If cmp (CmpL op1 zero));
 9487 
 9488   effect(USE op1, USE lbl);
 9489 
 9490   ins_cost(BRANCH_COST);
 9491 
 9492   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_branch" %}
 9493 
 9494   ins_encode %{
 9495     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9496   %}
 9497 
 9498   ins_pipe(pipe_cmpz_branch);
 9499   ins_short_branch(1);
 9500 %}
 9501 
 9502 instruct cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9503 %{
 9504   // Same match rule as `far_cmpL_reg_imm0_loop'.
 9505   match(CountedLoopEnd cmp (CmpL op1 zero));
 9506 
 9507   effect(USE op1, USE lbl);
 9508 
 9509   ins_cost(BRANCH_COST);
 9510 
 9511   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpL_reg_imm0_loop" %}
 9512 
 9513   ins_encode %{
 9514     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
 9515   %}
 9516 
 9517   ins_pipe(pipe_cmpz_branch);
 9518   ins_short_branch(1);
 9519 %}
 9520 
 9521 // Compare unsigned long with zero and branch near instructions
 9522 instruct cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
 9523 %{
 9524   // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_branch'.
 9525   match(If cmp (CmpUL op1 zero));
 9526 
 9527   effect(USE op1, USE lbl);
 9528 
 9529   ins_cost(BRANCH_COST);
 9530 
 9531   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_branch" %}
 9532 
 9533   ins_encode %{
 9534     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9535   %}
 9536 
 9537   ins_pipe(pipe_cmpz_branch);
 9538   ins_short_branch(1);
 9539 %}
 9540 
 9541 // Compare pointer with zero and branch near instructions
 9542 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
 9543   // Same match rule as `far_cmpP_reg_imm0_branch'.
 9544   match(If cmp (CmpP op1 zero));
 9545   effect(USE lbl);
 9546 
 9547   ins_cost(BRANCH_COST);
 9548   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_imm0_branch" %}
 9549 
 9550   ins_encode %{
 9551     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9552   %}
 9553 
 9554   ins_pipe(pipe_cmpz_branch);
 9555   ins_short_branch(1);
 9556 %}
 9557 
 9558 // Compare narrow pointer with zero and branch near instructions
 9559 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
 9560   // Same match rule as `far_cmpN_reg_imm0_branch'.
 9561   match(If cmp (CmpN op1 zero));
 9562   effect(USE lbl);
 9563 
 9564   ins_cost(BRANCH_COST);
 9565 
 9566   format %{ "b$cmp  $op1, zr, $lbl\t#@cmpN_imm0_branch" %}
 9567 
 9568   ins_encode %{
 9569     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9570   %}
 9571 
 9572   ins_pipe(pipe_cmpz_branch);
 9573   ins_short_branch(1);
 9574 %}
 9575 
 9576 // Compare narrow pointer with pointer zero and branch near instructions
 9577 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
 9578   // Same match rule as `far_cmpP_narrowOop_imm0_branch'.
 9579   match(If cmp (CmpP (DecodeN op1) zero));
 9580   effect(USE lbl);
 9581 
 9582   ins_cost(BRANCH_COST);
 9583   format %{ "b$cmp   $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_branch" %}
 9584 
 9585   ins_encode %{
 9586     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
 9587   %}
 9588 
 9589   ins_pipe(pipe_cmpz_branch);
 9590   ins_short_branch(1);
 9591 %}
 9592 
 9593 // Patterns for far (20KiB) variants
 9594 
 9595 instruct far_cmpFlag_branch(cmpOp cmp, rFlagsReg cr, label lbl) %{
 9596   match(If cmp cr);
 9597   effect(USE lbl);
 9598 
 9599   ins_cost(BRANCH_COST);
 9600   format %{ "far_b$cmp $cr, zr, $lbl\t#@far_cmpFlag_branch"%}
 9601 
 9602   ins_encode %{
 9603     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label), /* is_far */ true);
 9604   %}
 9605 
 9606   ins_pipe(pipe_cmpz_branch);
 9607 %}
 9608 
 9609 // Compare signed int and branch far instructions
 9610 instruct far_cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9611   match(If cmp (CmpI op1 op2));
 9612   effect(USE lbl);
 9613 
 9614   ins_cost(BRANCH_COST * 2);
 9615 
 9616   // the format instruction [far_b$cmp] here is be used as two insructions
 9617   // in macroassembler: b$not_cmp(op1, op2, done), j($lbl), bind(done)
 9618   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_branch" %}
 9619 
 9620   ins_encode %{
 9621     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9622   %}
 9623 
 9624   ins_pipe(pipe_cmp_branch);
 9625 %}
 9626 
 9627 instruct far_cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
 9628   match(CountedLoopEnd cmp (CmpI op1 op2));
 9629   effect(USE lbl);
 9630 
 9631   ins_cost(BRANCH_COST * 2);
 9632   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpI_loop" %}
 9633 
 9634   ins_encode %{
 9635     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9636   %}
 9637 
 9638   ins_pipe(pipe_cmp_branch);
 9639 %}
 9640 
 9641 instruct far_cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{
 9642   match(If cmp (CmpU op1 op2));
 9643   effect(USE lbl);
 9644 
 9645   ins_cost(BRANCH_COST * 2);
 9646   format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_branch" %}
 9647 
 9648   ins_encode %{
 9649     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9650                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9651   %}
 9652 
 9653   ins_pipe(pipe_cmp_branch);
 9654 %}
 9655 
 9656 instruct far_cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9657   match(If cmp (CmpL op1 op2));
 9658   effect(USE lbl);
 9659 
 9660   ins_cost(BRANCH_COST * 2);
 9661   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_branch" %}
 9662 
 9663   ins_encode %{
 9664     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9665   %}
 9666 
 9667   ins_pipe(pipe_cmp_branch);
 9668 %}
 9669 
 9670 instruct far_cmpLloop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
 9671   match(CountedLoopEnd cmp (CmpL op1 op2));
 9672   effect(USE lbl);
 9673 
 9674   ins_cost(BRANCH_COST * 2);
 9675   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpL_loop" %}
 9676 
 9677   ins_encode %{
 9678     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9679   %}
 9680 
 9681   ins_pipe(pipe_cmp_branch);
 9682 %}
 9683 
 9684 instruct far_cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{
 9685   match(If cmp (CmpUL op1 op2));
 9686   effect(USE lbl);
 9687 
 9688   ins_cost(BRANCH_COST * 2);
 9689   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpUL_branch" %}
 9690 
 9691   ins_encode %{
 9692     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9693                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9694   %}
 9695 
 9696   ins_pipe(pipe_cmp_branch);
 9697 %}
 9698 
 9699 instruct far_cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
 9700 %{
 9701   match(If cmp (CmpP op1 op2));
 9702 
 9703   effect(USE lbl);
 9704 
 9705   ins_cost(BRANCH_COST * 2);
 9706 
 9707   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpP_branch" %}
 9708 
 9709   ins_encode %{
 9710     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9711                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9712   %}
 9713 
 9714   ins_pipe(pipe_cmp_branch);
 9715 %}
 9716 
 9717 instruct far_cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
 9718 %{
 9719   match(If cmp (CmpN op1 op2));
 9720 
 9721   effect(USE lbl);
 9722 
 9723   ins_cost(BRANCH_COST * 2);
 9724 
 9725   format %{ "far_b$cmp  $op1, $op2, $lbl\t#@far_cmpN_branch" %}
 9726 
 9727   ins_encode %{
 9728     __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
 9729                        as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
 9730   %}
 9731 
 9732   ins_pipe(pipe_cmp_branch);
 9733 %}
 9734 
 9735 // Float compare and branch instructions
 9736 instruct far_cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
 9737 %{
 9738   match(If cmp (CmpF op1 op2));
 9739 
 9740   effect(USE lbl);
 9741 
 9742   ins_cost(XFER_COST + BRANCH_COST * 2);
 9743   format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_branch"%}
 9744 
 9745   ins_encode %{
 9746     __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
 9747                         *($lbl$$label), /* is_far */ true);
 9748   %}
 9749 
 9750   ins_pipe(pipe_class_compare);
 9751 %}
 9752 
 9753 // Double compare and branch instructions
 9754 instruct far_cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
 9755 %{
 9756   match(If cmp (CmpD op1 op2));
 9757   effect(USE lbl);
 9758 
 9759   ins_cost(XFER_COST + BRANCH_COST * 2);
 9760   format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_branch"%}
 9761 
 9762   ins_encode %{
 9763     __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
 9764                         as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true);
 9765   %}
 9766 
 9767   ins_pipe(pipe_class_compare);
 9768 %}
 9769 
 9770 instruct far_cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9771 %{
 9772   match(If cmp (CmpI op1 zero));
 9773 
 9774   effect(USE op1, USE lbl);
 9775 
 9776   ins_cost(BRANCH_COST * 2);
 9777 
 9778   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_branch" %}
 9779 
 9780   ins_encode %{
 9781     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9782   %}
 9783 
 9784   ins_pipe(pipe_cmpz_branch);
 9785 %}
 9786 
 9787 instruct far_cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
 9788 %{
 9789   match(CountedLoopEnd cmp (CmpI op1 zero));
 9790 
 9791   effect(USE op1, USE lbl);
 9792 
 9793   ins_cost(BRANCH_COST * 2);
 9794 
 9795   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpI_reg_imm0_loop" %}
 9796 
 9797   ins_encode %{
 9798     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9799   %}
 9800 
 9801   ins_pipe(pipe_cmpz_branch);
 9802 %}
 9803 
 9804 instruct far_cmpUEqNeLeGt_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
 9805 %{
 9806   match(If cmp (CmpU op1 zero));
 9807 
 9808   effect(USE op1, USE lbl);
 9809 
 9810   ins_cost(BRANCH_COST * 2);
 9811 
 9812   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_imm0_branch" %}
 9813 
 9814   ins_encode %{
 9815     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9816   %}
 9817 
 9818   ins_pipe(pipe_cmpz_branch);
 9819 %}
 9820 
 9821 // compare lt/ge unsigned instructs has no short instruct with same match
 9822 instruct far_cmpULtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl)
 9823 %{
 9824   match(If cmp (CmpU op1 zero));
 9825 
 9826   effect(USE op1, USE lbl);
 9827 
 9828   ins_cost(BRANCH_COST);
 9829 
 9830   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_branch" %}
 9831 
 9832   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
 9833 
 9834   ins_pipe(pipe_cmpz_branch);
 9835 %}
 9836 
 9837 instruct far_cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9838 %{
 9839   match(If cmp (CmpL op1 zero));
 9840 
 9841   effect(USE op1, USE lbl);
 9842 
 9843   ins_cost(BRANCH_COST * 2);
 9844 
 9845   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_branch" %}
 9846 
 9847   ins_encode %{
 9848     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9849   %}
 9850 
 9851   ins_pipe(pipe_cmpz_branch);
 9852 %}
 9853 
 9854 instruct far_cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
 9855 %{
 9856   match(CountedLoopEnd cmp (CmpL op1 zero));
 9857 
 9858   effect(USE op1, USE lbl);
 9859 
 9860   ins_cost(BRANCH_COST * 2);
 9861 
 9862   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpL_reg_imm0_loop" %}
 9863 
 9864   ins_encode %{
 9865     __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
 9866   %}
 9867 
 9868   ins_pipe(pipe_cmpz_branch);
 9869 %}
 9870 
 9871 instruct far_cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
 9872 %{
 9873   match(If cmp (CmpUL op1 zero));
 9874 
 9875   effect(USE op1, USE lbl);
 9876 
 9877   ins_cost(BRANCH_COST * 2);
 9878 
 9879   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_branch" %}
 9880 
 9881   ins_encode %{
 9882     __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9883   %}
 9884 
 9885   ins_pipe(pipe_cmpz_branch);
 9886 %}
 9887 
 9888 // compare lt/ge unsigned instructs has no short instruct with same match
 9889 instruct far_cmpULLtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl)
 9890 %{
 9891   match(If cmp (CmpUL op1 zero));
 9892 
 9893   effect(USE op1, USE lbl);
 9894 
 9895   ins_cost(BRANCH_COST);
 9896 
 9897   format %{ "j  $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_branch" %}
 9898 
 9899   ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
 9900 
 9901   ins_pipe(pipe_cmpz_branch);
 9902 %}
 9903 
 9904 instruct far_cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
 9905   match(If cmp (CmpP op1 zero));
 9906   effect(USE lbl);
 9907 
 9908   ins_cost(BRANCH_COST * 2);
 9909   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_imm0_branch" %}
 9910 
 9911   ins_encode %{
 9912     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9913   %}
 9914 
 9915   ins_pipe(pipe_cmpz_branch);
 9916 %}
 9917 
 9918 instruct far_cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
 9919   match(If cmp (CmpN op1 zero));
 9920   effect(USE lbl);
 9921 
 9922   ins_cost(BRANCH_COST * 2);
 9923 
 9924   format %{ "far_b$cmp  $op1, zr, $lbl\t#@far_cmpN_imm0_branch" %}
 9925 
 9926   ins_encode %{
 9927     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9928   %}
 9929 
 9930   ins_pipe(pipe_cmpz_branch);
 9931 %}
 9932 
 9933 instruct far_cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
 9934   match(If cmp (CmpP (DecodeN op1) zero));
 9935   effect(USE lbl);
 9936 
 9937   ins_cost(BRANCH_COST * 2);
 9938   format %{ "far_b$cmp   $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_branch" %}
 9939 
 9940   ins_encode %{
 9941     __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
 9942   %}
 9943 
 9944   ins_pipe(pipe_cmpz_branch);
 9945 %}
 9946 
 9947 // ============================================================================
 9948 // Conditional Move Instructions
 9949 
 9950 // --------- CMoveI ---------
 9951 
 9952 instruct cmovI_cmpI(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOp cop) %{
 9953   match(Set dst (CMoveI (Binary cop (CmpI op1 op2)) (Binary dst src)));
 9954   ins_cost(ALU_COST + BRANCH_COST);
 9955 
 9956   format %{
 9957     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpI\n\t"
 9958   %}
 9959 
 9960   ins_encode %{
 9961     __ enc_cmove($cop$$cmpcode,
 9962                  as_Register($op1$$reg), as_Register($op2$$reg),
 9963                  as_Register($dst$$reg), as_Register($src$$reg));
 9964   %}
 9965 
 9966   ins_pipe(pipe_class_compare);
 9967 %}
 9968 
 9969 instruct cmovI_cmpU(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOpU cop) %{
 9970   match(Set dst (CMoveI (Binary cop (CmpU op1 op2)) (Binary dst src)));
 9971   ins_cost(ALU_COST + BRANCH_COST);
 9972 
 9973   format %{
 9974     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpU\n\t"
 9975   %}
 9976 
 9977   ins_encode %{
 9978     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
 9979                  as_Register($op1$$reg), as_Register($op2$$reg),
 9980                  as_Register($dst$$reg), as_Register($src$$reg));
 9981   %}
 9982 
 9983   ins_pipe(pipe_class_compare);
 9984 %}
 9985 
 9986 instruct cmovI_cmpL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOp cop) %{
 9987   match(Set dst (CMoveI (Binary cop (CmpL op1 op2)) (Binary dst src)));
 9988   ins_cost(ALU_COST + BRANCH_COST);
 9989 
 9990   format %{
 9991     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpL\n\t"
 9992   %}
 9993 
 9994   ins_encode %{
 9995     __ enc_cmove($cop$$cmpcode,
 9996                  as_Register($op1$$reg), as_Register($op2$$reg),
 9997                  as_Register($dst$$reg), as_Register($src$$reg));
 9998   %}
 9999 
10000   ins_pipe(pipe_class_compare);
10001 %}
10002 
10003 instruct cmovI_cmpUL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOpU cop) %{
10004   match(Set dst (CMoveI (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10005   ins_cost(ALU_COST + BRANCH_COST);
10006 
10007   format %{
10008     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpUL\n\t"
10009   %}
10010 
10011   ins_encode %{
10012     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10013                  as_Register($op1$$reg), as_Register($op2$$reg),
10014                  as_Register($dst$$reg), as_Register($src$$reg));
10015   %}
10016 
10017   ins_pipe(pipe_class_compare);
10018 %}
10019 
10020 instruct cmovI_cmpF(iRegINoSp dst, iRegI src, fRegF op1, fRegF op2, cmpOp cop) %{
10021   match(Set dst (CMoveI (Binary cop (CmpF op1 op2)) (Binary dst src)));
10022   ins_cost(ALU_COST + BRANCH_COST);
10023 
10024   format %{
10025     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpF\n\t"
10026   %}
10027 
10028   ins_encode %{
10029     __ enc_cmove_cmp_fp($cop$$cmpcode,
10030                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10031                         as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10032   %}
10033 
10034   ins_pipe(pipe_class_compare);
10035 %}
10036 
10037 instruct cmovI_cmpD(iRegINoSp dst, iRegI src, fRegD op1, fRegD op2, cmpOp cop) %{
10038   match(Set dst (CMoveI (Binary cop (CmpD op1 op2)) (Binary dst src)));
10039   ins_cost(ALU_COST + BRANCH_COST);
10040 
10041   format %{
10042     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpD\n\t"
10043   %}
10044 
10045   ins_encode %{
10046     __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10047                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10048                         as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10049   %}
10050 
10051   ins_pipe(pipe_class_compare);
10052 %}
10053 
10054 instruct cmovI_cmpN(iRegINoSp dst, iRegI src, iRegN op1, iRegN op2, cmpOpU cop) %{
10055   match(Set dst (CMoveI (Binary cop (CmpN op1 op2)) (Binary dst src)));
10056   ins_cost(ALU_COST + BRANCH_COST);
10057 
10058   format %{
10059     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpN\n\t"
10060   %}
10061 
10062   ins_encode %{
10063     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10064                  as_Register($op1$$reg), as_Register($op2$$reg),
10065                  as_Register($dst$$reg), as_Register($src$$reg));
10066   %}
10067 
10068   ins_pipe(pipe_class_compare);
10069 %}
10070 
10071 instruct cmovI_cmpP(iRegINoSp dst, iRegI src, iRegP op1, iRegP op2, cmpOpU cop) %{
10072   match(Set dst (CMoveI (Binary cop (CmpP op1 op2)) (Binary dst src)));
10073   ins_cost(ALU_COST + BRANCH_COST);
10074 
10075   format %{
10076     "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpP\n\t"
10077   %}
10078 
10079   ins_encode %{
10080     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10081                  as_Register($op1$$reg), as_Register($op2$$reg),
10082                  as_Register($dst$$reg), as_Register($src$$reg));
10083   %}
10084 
10085   ins_pipe(pipe_class_compare);
10086 %}
10087 
10088 // --------- CMoveL ---------
10089 
10090 instruct cmovL_cmpL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOp cop) %{
10091   match(Set dst (CMoveL (Binary cop (CmpL op1 op2)) (Binary dst src)));
10092   ins_cost(ALU_COST + BRANCH_COST);
10093 
10094   format %{
10095     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpL\n\t"
10096   %}
10097 
10098   ins_encode %{
10099     __ enc_cmove($cop$$cmpcode,
10100                  as_Register($op1$$reg), as_Register($op2$$reg),
10101                  as_Register($dst$$reg), as_Register($src$$reg));
10102   %}
10103 
10104   ins_pipe(pipe_class_compare);
10105 %}
10106 
10107 instruct cmovL_cmpUL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOpU cop) %{
10108   match(Set dst (CMoveL (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10109   ins_cost(ALU_COST + BRANCH_COST);
10110 
10111   format %{
10112     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpUL\n\t"
10113   %}
10114 
10115   ins_encode %{
10116     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10117                  as_Register($op1$$reg), as_Register($op2$$reg),
10118                  as_Register($dst$$reg), as_Register($src$$reg));
10119   %}
10120 
10121   ins_pipe(pipe_class_compare);
10122 %}
10123 
10124 instruct cmovL_cmpI(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOp cop) %{
10125   match(Set dst (CMoveL (Binary cop (CmpI op1 op2)) (Binary dst src)));
10126   ins_cost(ALU_COST + BRANCH_COST);
10127 
10128   format %{
10129     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpI\n\t"
10130   %}
10131 
10132   ins_encode %{
10133     __ enc_cmove($cop$$cmpcode,
10134                  as_Register($op1$$reg), as_Register($op2$$reg),
10135                  as_Register($dst$$reg), as_Register($src$$reg));
10136   %}
10137 
10138   ins_pipe(pipe_class_compare);
10139 %}
10140 
10141 instruct cmovL_cmpU(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOpU cop) %{
10142   match(Set dst (CMoveL (Binary cop (CmpU op1 op2)) (Binary dst src)));
10143   ins_cost(ALU_COST + BRANCH_COST);
10144 
10145   format %{
10146     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpU\n\t"
10147   %}
10148 
10149   ins_encode %{
10150     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10151                  as_Register($op1$$reg), as_Register($op2$$reg),
10152                  as_Register($dst$$reg), as_Register($src$$reg));
10153   %}
10154 
10155   ins_pipe(pipe_class_compare);
10156 %}
10157 
10158 instruct cmovL_cmpF(iRegLNoSp dst, iRegL src, fRegF op1, fRegF op2, cmpOp cop) %{
10159   match(Set dst (CMoveL (Binary cop (CmpF op1 op2)) (Binary dst src)));
10160   ins_cost(ALU_COST + BRANCH_COST);
10161 
10162   format %{
10163     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpF\n\t"
10164   %}
10165 
10166   ins_encode %{
10167     __ enc_cmove_cmp_fp($cop$$cmpcode,
10168                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10169                         as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10170   %}
10171 
10172   ins_pipe(pipe_class_compare);
10173 %}
10174 
10175 instruct cmovL_cmpD(iRegLNoSp dst, iRegL src, fRegD op1, fRegD op2, cmpOp cop) %{
10176   match(Set dst (CMoveL (Binary cop (CmpD op1 op2)) (Binary dst src)));
10177   ins_cost(ALU_COST + BRANCH_COST);
10178 
10179   format %{
10180     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpD\n\t"
10181   %}
10182 
10183   ins_encode %{
10184     __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10185                         as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10186                         as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10187   %}
10188 
10189   ins_pipe(pipe_class_compare);
10190 %}
10191 
10192 instruct cmovL_cmpN(iRegLNoSp dst, iRegL src, iRegN op1, iRegN op2, cmpOpU cop) %{
10193   match(Set dst (CMoveL (Binary cop (CmpN op1 op2)) (Binary dst src)));
10194   ins_cost(ALU_COST + BRANCH_COST);
10195 
10196   format %{
10197     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpN\n\t"
10198   %}
10199 
10200   ins_encode %{
10201     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10202                  as_Register($op1$$reg), as_Register($op2$$reg),
10203                  as_Register($dst$$reg), as_Register($src$$reg));
10204   %}
10205 
10206   ins_pipe(pipe_class_compare);
10207 %}
10208 
10209 instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop) %{
10210   match(Set dst (CMoveL (Binary cop (CmpP op1 op2)) (Binary dst src)));
10211   ins_cost(ALU_COST + BRANCH_COST);
10212 
10213   format %{
10214     "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpP\n\t"
10215   %}
10216 
10217   ins_encode %{
10218     __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10219                  as_Register($op1$$reg), as_Register($op2$$reg),
10220                  as_Register($dst$$reg), as_Register($src$$reg));
10221   %}
10222 
10223   ins_pipe(pipe_class_compare);
10224 %}
10225 
10226 // ============================================================================
10227 // Procedure Call/Return Instructions
10228 
10229 // Call Java Static Instruction
10230 // Note: If this code changes, the corresponding ret_addr_offset() and
10231 //       compute_padding() functions will have to be adjusted.
10232 instruct CallStaticJavaDirect(method meth)
10233 %{
10234   match(CallStaticJava);
10235 
10236   effect(USE meth);
10237 
10238   ins_cost(BRANCH_COST);
10239 
10240   format %{ "CALL,static $meth\t#@CallStaticJavaDirect" %}
10241 
10242   ins_encode(riscv_enc_java_static_call(meth),
10243              riscv_enc_call_epilog);
10244 
10245   ins_pipe(pipe_class_call);
10246   ins_alignment(4);
10247 %}
10248 
10249 // TO HERE
10250 
10251 // Call Java Dynamic Instruction
10252 // Note: If this code changes, the corresponding ret_addr_offset() and
10253 //       compute_padding() functions will have to be adjusted.
10254 instruct CallDynamicJavaDirect(method meth)
10255 %{
10256   match(CallDynamicJava);
10257 
10258   effect(USE meth);
10259 
10260   ins_cost(BRANCH_COST + ALU_COST * 5);
10261 
10262   format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %}
10263 
10264   ins_encode(riscv_enc_java_dynamic_call(meth),
10265              riscv_enc_call_epilog);
10266 
10267   ins_pipe(pipe_class_call);
10268   ins_alignment(4);
10269 %}
10270 
10271 // Call Runtime Instruction
10272 
10273 instruct CallRuntimeDirect(method meth)
10274 %{
10275   match(CallRuntime);
10276 
10277   effect(USE meth);
10278 
10279   ins_cost(BRANCH_COST);
10280 
10281   format %{ "CALL, runtime $meth\t#@CallRuntimeDirect" %}
10282 
10283   ins_encode(riscv_enc_java_to_runtime(meth));
10284 
10285   ins_pipe(pipe_class_call);
10286 %}
10287 
10288 // Call Runtime Instruction
10289 
10290 instruct CallLeafDirect(method meth)
10291 %{
10292   match(CallLeaf);
10293 
10294   effect(USE meth);
10295 
10296   ins_cost(BRANCH_COST);
10297 
10298   format %{ "CALL, runtime leaf $meth\t#@CallLeafDirect" %}
10299 
10300   ins_encode(riscv_enc_java_to_runtime(meth));
10301 
10302   ins_pipe(pipe_class_call);
10303 %}
10304 
10305 // Call Runtime Instruction without safepoint and with vector arguments
10306 
10307 instruct CallLeafDirectVector(method meth)
10308 %{
10309   match(CallLeafVector);
10310 
10311   effect(USE meth);
10312 
10313   ins_cost(BRANCH_COST);
10314 
10315   format %{ "CALL, runtime leaf vector $meth" %}
10316 
10317   ins_encode(riscv_enc_java_to_runtime(meth));
10318 
10319   ins_pipe(pipe_class_call);
10320 %}
10321 
10322 // Call Runtime Instruction
10323 
10324 instruct CallLeafNoFPDirect(method meth)
10325 %{
10326   match(CallLeafNoFP);
10327 
10328   effect(USE meth);
10329 
10330   ins_cost(BRANCH_COST);
10331 
10332   format %{ "CALL, runtime leaf nofp $meth\t#@CallLeafNoFPDirect" %}
10333 
10334   ins_encode(riscv_enc_java_to_runtime(meth));
10335 
10336   ins_pipe(pipe_class_call);
10337 %}
10338 
10339 // ============================================================================
10340 // Partial Subtype Check
10341 //
10342 // superklass array for an instance of the superklass.  Set a hidden
10343 // internal cache on a hit (cache is checked with exposed code in
10344 // gen_subtype_check()).  Return zero for a hit.  The encoding
10345 // ALSO sets flags.
10346 
10347 instruct partialSubtypeCheck(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, rFlagsReg cr)
10348 %{
10349   predicate(!UseSecondarySupersTable);
10350   match(Set result (PartialSubtypeCheck sub super));
10351   effect(KILL tmp, KILL cr);
10352 
10353   ins_cost(20 * DEFAULT_COST);
10354   format %{ "partialSubtypeCheck $result, $sub, $super\t#@partialSubtypeCheck" %}
10355 
10356   ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10357 
10358   opcode(0x1); // Force zero of result reg on hit
10359 
10360   ins_pipe(pipe_class_memory);
10361 %}
10362 
10363 // Two versions of partialSubtypeCheck, both used when we need to
10364 // search for a super class in the secondary supers array. The first
10365 // is used when we don't know _a priori_ the class being searched
10366 // for. The second, far more common, is used when we do know: this is
10367 // used for instanceof, checkcast, and any case where C2 can determine
10368 // it by constant propagation.
10369 
10370 instruct partialSubtypeCheckVarSuper(iRegP_R14 sub, iRegP_R10 super, iRegP_R15 result,
10371                                      iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13,
10372                                      iRegP_R16 tmpR16, rFlagsReg cr)
10373 %{
10374   predicate(UseSecondarySupersTable);
10375   match(Set result (PartialSubtypeCheck sub super));
10376   effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10377 
10378   ins_cost(10 * DEFAULT_COST);  // slightly larger than the next version
10379   format %{ "partialSubtypeCheck $result, $sub, $super" %}
10380 
10381   ins_encode %{
10382     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register, $result$$Register,
10383                                          $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10384                                          $tmpR16$$Register, nullptr /*L_success*/);
10385   %}
10386 
10387   ins_pipe(pipe_class_memory);
10388 %}
10389 
10390 instruct partialSubtypeCheckConstSuper(iRegP_R14 sub, iRegP_R10 super_reg, immP super_con, iRegP_R15 result,
10391                                        iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13, iRegP_R16 tmpR16, rFlagsReg cr)
10392 %{
10393   predicate(UseSecondarySupersTable);
10394   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
10395   effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10396 
10397   ins_cost(5 * DEFAULT_COST); // needs to be less than competing nodes
10398   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
10399 
10400   ins_encode %{
10401     bool success = false;
10402     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
10403     if (InlineSecondarySupersTest) {
10404       success = __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register, $result$$Register,
10405                                                        $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10406                                                        $tmpR16$$Register, super_klass_slot);
10407     } else {
10408       address call = __ reloc_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
10409       success = (call != nullptr);
10410     }
10411     if (!success) {
10412       ciEnv::current()->record_failure("CodeCache is full");
10413       return;
10414     }
10415   %}
10416 
10417   ins_pipe(pipe_class_memory);
10418 %}
10419 
10420 instruct string_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10421                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10422 %{
10423   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU);
10424   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10425   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10426 
10427   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %}
10428   ins_encode %{
10429     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10430     __ string_compare($str1$$Register, $str2$$Register,
10431                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10432                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10433                       StrIntrinsicNode::UU);
10434   %}
10435   ins_pipe(pipe_class_memory);
10436 %}
10437 
10438 instruct string_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10439                          iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10440 %{
10441   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL);
10442   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10443   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10444 
10445   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %}
10446   ins_encode %{
10447     __ string_compare($str1$$Register, $str2$$Register,
10448                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10449                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10450                       StrIntrinsicNode::LL);
10451   %}
10452   ins_pipe(pipe_class_memory);
10453 %}
10454 
10455 instruct string_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10456                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10457 %{
10458   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL);
10459   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10460   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10461 
10462   format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %}
10463   ins_encode %{
10464     __ string_compare($str1$$Register, $str2$$Register,
10465                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10466                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10467                       StrIntrinsicNode::UL);
10468   %}
10469   ins_pipe(pipe_class_memory);
10470 %}
10471 
10472 instruct string_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10473                           iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3,
10474                           rFlagsReg cr)
10475 %{
10476   predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU);
10477   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10478   effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10479 
10480   format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %}
10481   ins_encode %{
10482     __ string_compare($str1$$Register, $str2$$Register,
10483                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
10484                       $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10485                       StrIntrinsicNode::LU);
10486   %}
10487   ins_pipe(pipe_class_memory);
10488 %}
10489 
10490 instruct string_indexofUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10491                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10492                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10493 %{
10494   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10495   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10496   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10497          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10498 
10499   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
10500   ins_encode %{
10501     __ string_indexof($str1$$Register, $str2$$Register,
10502                       $cnt1$$Register, $cnt2$$Register,
10503                       $tmp1$$Register, $tmp2$$Register,
10504                       $tmp3$$Register, $tmp4$$Register,
10505                       $tmp5$$Register, $tmp6$$Register,
10506                       $result$$Register, StrIntrinsicNode::UU);
10507   %}
10508   ins_pipe(pipe_class_memory);
10509 %}
10510 
10511 instruct string_indexofLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10512                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10513                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10514 %{
10515   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10516   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10517   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10518          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10519 
10520   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
10521   ins_encode %{
10522     __ string_indexof($str1$$Register, $str2$$Register,
10523                       $cnt1$$Register, $cnt2$$Register,
10524                       $tmp1$$Register, $tmp2$$Register,
10525                       $tmp3$$Register, $tmp4$$Register,
10526                       $tmp5$$Register, $tmp6$$Register,
10527                       $result$$Register, StrIntrinsicNode::LL);
10528   %}
10529   ins_pipe(pipe_class_memory);
10530 %}
10531 
10532 instruct string_indexofUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10533                           iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10534                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10535 %{
10536   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10537   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10538   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10539          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10540   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
10541 
10542   ins_encode %{
10543     __ string_indexof($str1$$Register, $str2$$Register,
10544                       $cnt1$$Register, $cnt2$$Register,
10545                       $tmp1$$Register, $tmp2$$Register,
10546                       $tmp3$$Register, $tmp4$$Register,
10547                       $tmp5$$Register, $tmp6$$Register,
10548                       $result$$Register, StrIntrinsicNode::UL);
10549   %}
10550   ins_pipe(pipe_class_memory);
10551 %}
10552 
10553 instruct string_indexof_conUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10554                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10555                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10556 %{
10557   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10558   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10559   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10560          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10561 
10562   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
10563 
10564   ins_encode %{
10565     int icnt2 = (int)$int_cnt2$$constant;
10566     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10567                                  $cnt1$$Register, zr,
10568                                  $tmp1$$Register, $tmp2$$Register,
10569                                  $tmp3$$Register, $tmp4$$Register,
10570                                  icnt2, $result$$Register, StrIntrinsicNode::UU);
10571   %}
10572   ins_pipe(pipe_class_memory);
10573 %}
10574 
10575 instruct string_indexof_conLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10576                               immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10577                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10578 %{
10579   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10580   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10581   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10582          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10583 
10584   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
10585   ins_encode %{
10586     int icnt2 = (int)$int_cnt2$$constant;
10587     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10588                                  $cnt1$$Register, zr,
10589                                  $tmp1$$Register, $tmp2$$Register,
10590                                  $tmp3$$Register, $tmp4$$Register,
10591                                  icnt2, $result$$Register, StrIntrinsicNode::LL);
10592   %}
10593   ins_pipe(pipe_class_memory);
10594 %}
10595 
10596 instruct string_indexof_conUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10597                               immI_1 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10598                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10599 %{
10600   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10601   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10602   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10603          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10604 
10605   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
10606   ins_encode %{
10607     int icnt2 = (int)$int_cnt2$$constant;
10608     __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10609                                  $cnt1$$Register, zr,
10610                                  $tmp1$$Register, $tmp2$$Register,
10611                                  $tmp3$$Register, $tmp4$$Register,
10612                                  icnt2, $result$$Register, StrIntrinsicNode::UL);
10613   %}
10614   ins_pipe(pipe_class_memory);
10615 %}
10616 
10617 instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10618                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10619                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10620 %{
10621   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10622   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
10623   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10624          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10625 
10626   format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10627   ins_encode %{
10628     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10629                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
10630                            $tmp3$$Register, $tmp4$$Register, false /* isU */);
10631   %}
10632   ins_pipe(pipe_class_memory);
10633 %}
10634 
10635 
10636 instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10637                               iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10638                               iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10639 %{
10640   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10641   predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
10642   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10643          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10644 
10645   format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10646   ins_encode %{
10647     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10648                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
10649                            $tmp3$$Register, $tmp4$$Register, true /* isL */);
10650   %}
10651   ins_pipe(pipe_class_memory);
10652 %}
10653 
10654 // clearing of an array
10655 instruct clearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, iRegP_R30 tmp1,
10656                             iRegP_R31 tmp2, rFlagsReg cr, Universe dummy)
10657 %{
10658   // temp registers must match the one used in StubGenerator::generate_zero_blocks()
10659   predicate(UseBlockZeroing || !UseRVV);
10660   match(Set dummy (ClearArray cnt base));
10661   effect(USE_KILL cnt, USE_KILL base, TEMP tmp1, TEMP tmp2, KILL cr);
10662 
10663   ins_cost(4 * DEFAULT_COST);
10664   format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %}
10665 
10666   ins_encode %{
10667     address tpc = __ zero_words($base$$Register, $cnt$$Register);
10668     if (tpc == nullptr) {
10669       ciEnv::current()->record_failure("CodeCache is full");
10670       return;
10671     }
10672   %}
10673 
10674   ins_pipe(pipe_class_memory);
10675 %}
10676 
10677 instruct clearArray_imm_reg(immL cnt, iRegP_R28 base, Universe dummy, rFlagsReg cr)
10678 %{
10679   predicate(!UseRVV && (uint64_t)n->in(2)->get_long()
10680             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
10681   match(Set dummy (ClearArray cnt base));
10682   effect(USE_KILL base, KILL cr);
10683 
10684   ins_cost(4 * DEFAULT_COST);
10685   format %{ "ClearArray $cnt, $base\t#@clearArray_imm_reg" %}
10686 
10687   ins_encode %{
10688     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
10689   %}
10690 
10691   ins_pipe(pipe_class_memory);
10692 %}
10693 
10694 instruct string_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt,
10695                         iRegI_R10 result, rFlagsReg cr)
10696 %{
10697   predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
10698   match(Set result (StrEquals (Binary str1 str2) cnt));
10699   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
10700 
10701   format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %}
10702   ins_encode %{
10703     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10704     __ string_equals($str1$$Register, $str2$$Register,
10705                      $result$$Register, $cnt$$Register);
10706   %}
10707   ins_pipe(pipe_class_memory);
10708 %}
10709 
10710 instruct array_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10711                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10712 %{
10713   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
10714   match(Set result (AryEq ary1 ary2));
10715   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10716 
10717   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsB // KILL all" %}
10718   ins_encode %{
10719     __ arrays_equals($ary1$$Register, $ary2$$Register,
10720                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10721                      $result$$Register, 1);
10722   %}
10723   ins_pipe(pipe_class_memory);
10724 %}
10725 
10726 instruct array_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10727                        iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10728 %{
10729   predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
10730   match(Set result (AryEq ary1 ary2));
10731   effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10732 
10733   format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsC // KILL all" %}
10734   ins_encode %{
10735     __ arrays_equals($ary1$$Register, $ary2$$Register,
10736                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10737                      $result$$Register, 2);
10738   %}
10739   ins_pipe(pipe_class_memory);
10740 %}
10741 
10742 // fast ArraysSupport.vectorizedHashCode
10743 instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
10744                          iRegLNoSp tmp1, iRegLNoSp tmp2,
10745                          iRegLNoSp tmp3, iRegLNoSp tmp4,
10746                          iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
10747 %{
10748   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
10749   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
10750          USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
10751 
10752   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
10753   ins_encode %{
10754     __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
10755                        $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10756                        $tmp4$$Register, $tmp5$$Register, $tmp6$$Register,
10757                        (BasicType)$basic_type$$constant);
10758   %}
10759   ins_pipe(pipe_class_memory);
10760 %}
10761 
10762 // ============================================================================
10763 // Safepoint Instructions
10764 
10765 instruct safePoint(iRegP poll)
10766 %{
10767   match(SafePoint poll);
10768 
10769   ins_cost(2 * LOAD_COST);
10770   format %{
10771     "lwu zr, [$poll]\t# Safepoint: poll for GC, #@safePoint"
10772   %}
10773   ins_encode %{
10774     __ read_polling_page(as_Register($poll$$reg), 0, relocInfo::poll_type);
10775   %}
10776   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
10777 %}
10778 
10779 // ============================================================================
10780 // This name is KNOWN by the ADLC and cannot be changed.
10781 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
10782 // for this guy.
10783 instruct tlsLoadP(javaThread_RegP dst)
10784 %{
10785   match(Set dst (ThreadLocal));
10786 
10787   ins_cost(0);
10788 
10789   format %{ " -- \t// $dst=Thread::current(), empty, #@tlsLoadP" %}
10790 
10791   size(0);
10792 
10793   ins_encode( /*empty*/ );
10794 
10795   ins_pipe(pipe_class_empty);
10796 %}
10797 
10798 // inlined locking and unlocking
10799 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
10800 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
10801                      iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
10802 %{
10803   predicate(LockingMode != LM_LIGHTWEIGHT);
10804   match(Set cr (FastLock object box));
10805   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
10806 
10807   ins_cost(10 * DEFAULT_COST);
10808   format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
10809 
10810   ins_encode %{
10811     __ fast_lock($object$$Register, $box$$Register,
10812                  $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
10813   %}
10814 
10815   ins_pipe(pipe_serial);
10816 %}
10817 
10818 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
10819 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2)
10820 %{
10821   predicate(LockingMode != LM_LIGHTWEIGHT);
10822   match(Set cr (FastUnlock object box));
10823   effect(TEMP tmp1, TEMP tmp2);
10824 
10825   ins_cost(10 * DEFAULT_COST);
10826   format %{ "fastunlock $object,$box\t! kills $tmp1, $tmp2, #@cmpFastUnlock" %}
10827 
10828   ins_encode %{
10829     __ fast_unlock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
10830   %}
10831 
10832   ins_pipe(pipe_serial);
10833 %}
10834 
10835 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
10836                                 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
10837 %{
10838   predicate(LockingMode == LM_LIGHTWEIGHT);
10839   match(Set cr (FastLock object box));
10840   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
10841 
10842   ins_cost(10 * DEFAULT_COST);
10843   format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLockLightweight" %}
10844 
10845   ins_encode %{
10846     __ fast_lock_lightweight($object$$Register, $box$$Register,
10847                              $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
10848   %}
10849 
10850   ins_pipe(pipe_serial);
10851 %}
10852 
10853 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box,
10854                                   iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
10855 %{
10856   predicate(LockingMode == LM_LIGHTWEIGHT);
10857   match(Set cr (FastUnlock object box));
10858   effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
10859 
10860   ins_cost(10 * DEFAULT_COST);
10861   format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %}
10862 
10863   ins_encode %{
10864     __ fast_unlock_lightweight($object$$Register, $box$$Register,
10865                                $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
10866   %}
10867 
10868   ins_pipe(pipe_serial);
10869 %}
10870 
10871 // Tail Call; Jump from runtime stub to Java code.
10872 // Also known as an 'interprocedural jump'.
10873 // Target of jump will eventually return to caller.
10874 // TailJump below removes the return address.
10875 // Don't use fp for 'jump_target' because a MachEpilogNode has already been
10876 // emitted just above the TailCall which has reset fp to the caller state.
10877 instruct TailCalljmpInd(iRegPNoSpNoFp jump_target, inline_cache_RegP method_oop)
10878 %{
10879   match(TailCall jump_target method_oop);
10880 
10881   ins_cost(BRANCH_COST);
10882 
10883   format %{ "jalr $jump_target\t# $method_oop holds method oop, #@TailCalljmpInd." %}
10884 
10885   ins_encode(riscv_enc_tail_call(jump_target));
10886 
10887   ins_pipe(pipe_class_call);
10888 %}
10889 
10890 instruct TailjmpInd(iRegPNoSpNoFp jump_target, iRegP_R10 ex_oop)
10891 %{
10892   match(TailJump jump_target ex_oop);
10893 
10894   ins_cost(ALU_COST + BRANCH_COST);
10895 
10896   format %{ "jalr $jump_target\t# $ex_oop holds exception oop, #@TailjmpInd." %}
10897 
10898   ins_encode(riscv_enc_tail_jmp(jump_target));
10899 
10900   ins_pipe(pipe_class_call);
10901 %}
10902 
10903 // Forward exception.
10904 instruct ForwardExceptionjmp()
10905 %{
10906   match(ForwardException);
10907 
10908   ins_cost(BRANCH_COST);
10909 
10910   format %{ "j forward_exception_stub\t#@ForwardException" %}
10911 
10912   ins_encode %{
10913     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
10914   %}
10915 
10916   ins_pipe(pipe_class_call);
10917 %}
10918 
10919 // Create exception oop: created by stack-crawling runtime code.
10920 // Created exception is now available to this handler, and is setup
10921 // just prior to jumping to this handler. No code emitted.
10922 instruct CreateException(iRegP_R10 ex_oop)
10923 %{
10924   match(Set ex_oop (CreateEx));
10925 
10926   ins_cost(0);
10927   format %{ " -- \t// exception oop; no code emitted, #@CreateException" %}
10928 
10929   size(0);
10930 
10931   ins_encode( /*empty*/ );
10932 
10933   ins_pipe(pipe_class_empty);
10934 %}
10935 
10936 // Rethrow exception: The exception oop will come in the first
10937 // argument position. Then JUMP (not call) to the rethrow stub code.
10938 instruct RethrowException()
10939 %{
10940   match(Rethrow);
10941 
10942   ins_cost(BRANCH_COST);
10943 
10944   format %{ "j rethrow_stub\t#@RethrowException" %}
10945 
10946   ins_encode(riscv_enc_rethrow());
10947 
10948   ins_pipe(pipe_class_call);
10949 %}
10950 
10951 // Return Instruction
10952 // epilog node loads ret address into ra as part of frame pop
10953 instruct Ret()
10954 %{
10955   match(Return);
10956 
10957   ins_cost(BRANCH_COST);
10958   format %{ "ret\t// return register, #@Ret" %}
10959 
10960   ins_encode(riscv_enc_ret());
10961 
10962   ins_pipe(pipe_branch);
10963 %}
10964 
10965 // Die now.
10966 instruct ShouldNotReachHere() %{
10967   match(Halt);
10968 
10969   ins_cost(BRANCH_COST);
10970 
10971   format %{ "#@ShouldNotReachHere" %}
10972 
10973   ins_encode %{
10974     if (is_reachable()) {
10975       const char* str = __ code_string(_halt_reason);
10976       __ stop(str);
10977     }
10978   %}
10979 
10980   ins_pipe(pipe_class_default);
10981 %}
10982 
10983 
10984 //----------PEEPHOLE RULES-----------------------------------------------------
10985 // These must follow all instruction definitions as they use the names
10986 // defined in the instructions definitions.
10987 //
10988 // peepmatch ( root_instr_name [preceding_instruction]* );
10989 //
10990 // peepconstraint %{
10991 // (instruction_number.operand_name relational_op instruction_number.operand_name
10992 //  [, ...] );
10993 // // instruction numbers are zero-based using left to right order in peepmatch
10994 //
10995 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
10996 // // provide an instruction_number.operand_name for each operand that appears
10997 // // in the replacement instruction's match rule
10998 //
10999 // ---------VM FLAGS---------------------------------------------------------
11000 //
11001 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11002 //
11003 // Each peephole rule is given an identifying number starting with zero and
11004 // increasing by one in the order seen by the parser.  An individual peephole
11005 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11006 // on the command-line.
11007 //
11008 // ---------CURRENT LIMITATIONS----------------------------------------------
11009 //
11010 // Only match adjacent instructions in same basic block
11011 // Only equality constraints
11012 // Only constraints between operands, not (0.dest_reg == RAX_enc)
11013 // Only one replacement instruction
11014 //
11015 //----------SMARTSPILL RULES---------------------------------------------------
11016 // These must follow all instruction definitions as they use the names
11017 // defined in the instructions definitions.
11018 
11019 // Local Variables:
11020 // mode: c++
11021 // End: