1 //
2 // Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 // Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 //
7 // This code is free software; you can redistribute it and/or modify it
8 // under the terms of the GNU General Public License version 2 only, as
9 // published by the Free Software Foundation.
10 //
11 // This code is distributed in the hope that it will be useful, but WITHOUT
12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 // version 2 for more details (a copy is included in the LICENSE file that
15 // accompanied this code).
16 //
17 // You should have received a copy of the GNU General Public License version
18 // 2 along with this work; if not, write to the Free Software Foundation,
19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 //
21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 // or visit www.oracle.com if you need additional information or have any
23 // questions.
24 //
25 //
26
27 // RISCV Architecture Description File
28
29 //----------REGISTER DEFINITION BLOCK------------------------------------------
30 // This information is used by the matcher and the register allocator to
31 // describe individual registers and classes of registers within the target
32 // architecture.
33
34 register %{
35 //----------Architecture Description Register Definitions----------------------
36 // General Registers
37 // "reg_def" name ( register save type, C convention save type,
38 // ideal register type, encoding );
39 // Register Save Types:
40 //
41 // NS = No-Save: The register allocator assumes that these registers
42 // can be used without saving upon entry to the method, &
43 // that they do not need to be saved at call sites.
44 //
45 // SOC = Save-On-Call: The register allocator assumes that these registers
46 // can be used without saving upon entry to the method,
47 // but that they must be saved at call sites.
48 //
49 // SOE = Save-On-Entry: The register allocator assumes that these registers
50 // must be saved before using them upon entry to the
51 // method, but they do not need to be saved at call
52 // sites.
53 //
54 // AS = Always-Save: The register allocator assumes that these registers
55 // must be saved before using them upon entry to the
56 // method, & that they must be saved at call sites.
57 //
58 // Ideal Register Type is used to determine how to save & restore a
59 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
60 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
61 //
62 // The encoding number is the actual bit-pattern placed into the opcodes.
63
64 // We must define the 64 bit int registers in two 32 bit halves, the
65 // real lower register and a virtual upper half register. upper halves
66 // are used by the register allocator but are not actually supplied as
67 // operands to memory ops.
68 //
69 // follow the C1 compiler in making registers
70 //
71 // x7, x9-x17, x27-x31 volatile (caller save)
72 // x0-x4, x8, x23 system (no save, no allocate)
73 // x5-x6 non-allocatable (so we can use them as temporary regs)
74
75 //
76 // as regards Java usage. we don't use any callee save registers
77 // because this makes it difficult to de-optimise a frame (see comment
78 // in x86 implementation of Deoptimization::unwind_callee_save_values)
79 //
80
81 // General Registers
82
83 reg_def R0 ( NS, NS, Op_RegI, 0, x0->as_VMReg() ); // zr
84 reg_def R0_H ( NS, NS, Op_RegI, 0, x0->as_VMReg()->next() );
85 reg_def R1 ( NS, SOC, Op_RegI, 1, x1->as_VMReg() ); // ra
86 reg_def R1_H ( NS, SOC, Op_RegI, 1, x1->as_VMReg()->next() );
87 reg_def R2 ( NS, NS, Op_RegI, 2, x2->as_VMReg() ); // sp
88 reg_def R2_H ( NS, NS, Op_RegI, 2, x2->as_VMReg()->next() );
89 reg_def R3 ( NS, NS, Op_RegI, 3, x3->as_VMReg() ); // gp
90 reg_def R3_H ( NS, NS, Op_RegI, 3, x3->as_VMReg()->next() );
91 reg_def R4 ( NS, NS, Op_RegI, 4, x4->as_VMReg() ); // tp
92 reg_def R4_H ( NS, NS, Op_RegI, 4, x4->as_VMReg()->next() );
93 reg_def R7 ( SOC, SOC, Op_RegI, 7, x7->as_VMReg() );
94 reg_def R7_H ( SOC, SOC, Op_RegI, 7, x7->as_VMReg()->next() );
95 reg_def R8 ( NS, SOE, Op_RegI, 8, x8->as_VMReg() ); // fp
96 reg_def R8_H ( NS, SOE, Op_RegI, 8, x8->as_VMReg()->next() );
97 reg_def R9 ( SOC, SOE, Op_RegI, 9, x9->as_VMReg() );
98 reg_def R9_H ( SOC, SOE, Op_RegI, 9, x9->as_VMReg()->next() );
99 reg_def R10 ( SOC, SOC, Op_RegI, 10, x10->as_VMReg() );
100 reg_def R10_H ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next());
101 reg_def R11 ( SOC, SOC, Op_RegI, 11, x11->as_VMReg() );
102 reg_def R11_H ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next());
103 reg_def R12 ( SOC, SOC, Op_RegI, 12, x12->as_VMReg() );
104 reg_def R12_H ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next());
105 reg_def R13 ( SOC, SOC, Op_RegI, 13, x13->as_VMReg() );
106 reg_def R13_H ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next());
107 reg_def R14 ( SOC, SOC, Op_RegI, 14, x14->as_VMReg() );
108 reg_def R14_H ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next());
109 reg_def R15 ( SOC, SOC, Op_RegI, 15, x15->as_VMReg() );
110 reg_def R15_H ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next());
111 reg_def R16 ( SOC, SOC, Op_RegI, 16, x16->as_VMReg() );
112 reg_def R16_H ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next());
113 reg_def R17 ( SOC, SOC, Op_RegI, 17, x17->as_VMReg() );
114 reg_def R17_H ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next());
115 reg_def R18 ( SOC, SOE, Op_RegI, 18, x18->as_VMReg() );
116 reg_def R18_H ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next());
117 reg_def R19 ( SOC, SOE, Op_RegI, 19, x19->as_VMReg() );
118 reg_def R19_H ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next());
119 reg_def R20 ( SOC, SOE, Op_RegI, 20, x20->as_VMReg() ); // caller esp
120 reg_def R20_H ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next());
121 reg_def R21 ( SOC, SOE, Op_RegI, 21, x21->as_VMReg() );
122 reg_def R21_H ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next());
123 reg_def R22 ( SOC, SOE, Op_RegI, 22, x22->as_VMReg() );
124 reg_def R22_H ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next());
125 reg_def R23 ( NS, SOE, Op_RegI, 23, x23->as_VMReg() ); // java thread
126 reg_def R23_H ( NS, SOE, Op_RegI, 23, x23->as_VMReg()->next());
127 reg_def R24 ( SOC, SOE, Op_RegI, 24, x24->as_VMReg() );
128 reg_def R24_H ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next());
129 reg_def R25 ( SOC, SOE, Op_RegI, 25, x25->as_VMReg() );
130 reg_def R25_H ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next());
131 reg_def R26 ( SOC, SOE, Op_RegI, 26, x26->as_VMReg() );
132 reg_def R26_H ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next());
133 reg_def R27 ( SOC, SOE, Op_RegI, 27, x27->as_VMReg() ); // heapbase
134 reg_def R27_H ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next());
135 reg_def R28 ( SOC, SOC, Op_RegI, 28, x28->as_VMReg() );
136 reg_def R28_H ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next());
137 reg_def R29 ( SOC, SOC, Op_RegI, 29, x29->as_VMReg() );
138 reg_def R29_H ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next());
139 reg_def R30 ( SOC, SOC, Op_RegI, 30, x30->as_VMReg() );
140 reg_def R30_H ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next());
141 reg_def R31 ( SOC, SOC, Op_RegI, 31, x31->as_VMReg() );
142 reg_def R31_H ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next());
143
144 // ----------------------------
145 // Float/Double Registers
146 // ----------------------------
147
148 // Double Registers
149
150 // The rules of ADL require that double registers be defined in pairs.
151 // Each pair must be two 32-bit values, but not necessarily a pair of
152 // single float registers. In each pair, ADLC-assigned register numbers
153 // must be adjacent, with the lower number even. Finally, when the
154 // CPU stores such a register pair to memory, the word associated with
155 // the lower ADLC-assigned number must be stored to the lower address.
156
157 // RISCV has 32 floating-point registers. Each can store a single
158 // or double precision floating-point value.
159
160 // for Java use float registers f0-f31 are always save on call whereas
161 // the platform ABI treats f8-f9 and f18-f27 as callee save). Other
162 // float registers are SOC as per the platform spec
163
164 reg_def F0 ( SOC, SOC, Op_RegF, 0, f0->as_VMReg() );
165 reg_def F0_H ( SOC, SOC, Op_RegF, 0, f0->as_VMReg()->next() );
166 reg_def F1 ( SOC, SOC, Op_RegF, 1, f1->as_VMReg() );
167 reg_def F1_H ( SOC, SOC, Op_RegF, 1, f1->as_VMReg()->next() );
168 reg_def F2 ( SOC, SOC, Op_RegF, 2, f2->as_VMReg() );
169 reg_def F2_H ( SOC, SOC, Op_RegF, 2, f2->as_VMReg()->next() );
170 reg_def F3 ( SOC, SOC, Op_RegF, 3, f3->as_VMReg() );
171 reg_def F3_H ( SOC, SOC, Op_RegF, 3, f3->as_VMReg()->next() );
172 reg_def F4 ( SOC, SOC, Op_RegF, 4, f4->as_VMReg() );
173 reg_def F4_H ( SOC, SOC, Op_RegF, 4, f4->as_VMReg()->next() );
174 reg_def F5 ( SOC, SOC, Op_RegF, 5, f5->as_VMReg() );
175 reg_def F5_H ( SOC, SOC, Op_RegF, 5, f5->as_VMReg()->next() );
176 reg_def F6 ( SOC, SOC, Op_RegF, 6, f6->as_VMReg() );
177 reg_def F6_H ( SOC, SOC, Op_RegF, 6, f6->as_VMReg()->next() );
178 reg_def F7 ( SOC, SOC, Op_RegF, 7, f7->as_VMReg() );
179 reg_def F7_H ( SOC, SOC, Op_RegF, 7, f7->as_VMReg()->next() );
180 reg_def F8 ( SOC, SOE, Op_RegF, 8, f8->as_VMReg() );
181 reg_def F8_H ( SOC, SOE, Op_RegF, 8, f8->as_VMReg()->next() );
182 reg_def F9 ( SOC, SOE, Op_RegF, 9, f9->as_VMReg() );
183 reg_def F9_H ( SOC, SOE, Op_RegF, 9, f9->as_VMReg()->next() );
184 reg_def F10 ( SOC, SOC, Op_RegF, 10, f10->as_VMReg() );
185 reg_def F10_H ( SOC, SOC, Op_RegF, 10, f10->as_VMReg()->next() );
186 reg_def F11 ( SOC, SOC, Op_RegF, 11, f11->as_VMReg() );
187 reg_def F11_H ( SOC, SOC, Op_RegF, 11, f11->as_VMReg()->next() );
188 reg_def F12 ( SOC, SOC, Op_RegF, 12, f12->as_VMReg() );
189 reg_def F12_H ( SOC, SOC, Op_RegF, 12, f12->as_VMReg()->next() );
190 reg_def F13 ( SOC, SOC, Op_RegF, 13, f13->as_VMReg() );
191 reg_def F13_H ( SOC, SOC, Op_RegF, 13, f13->as_VMReg()->next() );
192 reg_def F14 ( SOC, SOC, Op_RegF, 14, f14->as_VMReg() );
193 reg_def F14_H ( SOC, SOC, Op_RegF, 14, f14->as_VMReg()->next() );
194 reg_def F15 ( SOC, SOC, Op_RegF, 15, f15->as_VMReg() );
195 reg_def F15_H ( SOC, SOC, Op_RegF, 15, f15->as_VMReg()->next() );
196 reg_def F16 ( SOC, SOC, Op_RegF, 16, f16->as_VMReg() );
197 reg_def F16_H ( SOC, SOC, Op_RegF, 16, f16->as_VMReg()->next() );
198 reg_def F17 ( SOC, SOC, Op_RegF, 17, f17->as_VMReg() );
199 reg_def F17_H ( SOC, SOC, Op_RegF, 17, f17->as_VMReg()->next() );
200 reg_def F18 ( SOC, SOE, Op_RegF, 18, f18->as_VMReg() );
201 reg_def F18_H ( SOC, SOE, Op_RegF, 18, f18->as_VMReg()->next() );
202 reg_def F19 ( SOC, SOE, Op_RegF, 19, f19->as_VMReg() );
203 reg_def F19_H ( SOC, SOE, Op_RegF, 19, f19->as_VMReg()->next() );
204 reg_def F20 ( SOC, SOE, Op_RegF, 20, f20->as_VMReg() );
205 reg_def F20_H ( SOC, SOE, Op_RegF, 20, f20->as_VMReg()->next() );
206 reg_def F21 ( SOC, SOE, Op_RegF, 21, f21->as_VMReg() );
207 reg_def F21_H ( SOC, SOE, Op_RegF, 21, f21->as_VMReg()->next() );
208 reg_def F22 ( SOC, SOE, Op_RegF, 22, f22->as_VMReg() );
209 reg_def F22_H ( SOC, SOE, Op_RegF, 22, f22->as_VMReg()->next() );
210 reg_def F23 ( SOC, SOE, Op_RegF, 23, f23->as_VMReg() );
211 reg_def F23_H ( SOC, SOE, Op_RegF, 23, f23->as_VMReg()->next() );
212 reg_def F24 ( SOC, SOE, Op_RegF, 24, f24->as_VMReg() );
213 reg_def F24_H ( SOC, SOE, Op_RegF, 24, f24->as_VMReg()->next() );
214 reg_def F25 ( SOC, SOE, Op_RegF, 25, f25->as_VMReg() );
215 reg_def F25_H ( SOC, SOE, Op_RegF, 25, f25->as_VMReg()->next() );
216 reg_def F26 ( SOC, SOE, Op_RegF, 26, f26->as_VMReg() );
217 reg_def F26_H ( SOC, SOE, Op_RegF, 26, f26->as_VMReg()->next() );
218 reg_def F27 ( SOC, SOE, Op_RegF, 27, f27->as_VMReg() );
219 reg_def F27_H ( SOC, SOE, Op_RegF, 27, f27->as_VMReg()->next() );
220 reg_def F28 ( SOC, SOC, Op_RegF, 28, f28->as_VMReg() );
221 reg_def F28_H ( SOC, SOC, Op_RegF, 28, f28->as_VMReg()->next() );
222 reg_def F29 ( SOC, SOC, Op_RegF, 29, f29->as_VMReg() );
223 reg_def F29_H ( SOC, SOC, Op_RegF, 29, f29->as_VMReg()->next() );
224 reg_def F30 ( SOC, SOC, Op_RegF, 30, f30->as_VMReg() );
225 reg_def F30_H ( SOC, SOC, Op_RegF, 30, f30->as_VMReg()->next() );
226 reg_def F31 ( SOC, SOC, Op_RegF, 31, f31->as_VMReg() );
227 reg_def F31_H ( SOC, SOC, Op_RegF, 31, f31->as_VMReg()->next() );
228
229 // ----------------------------
230 // Vector Registers
231 // ----------------------------
232
233 // For RVV vector registers, we simply extend vector register size to 4
234 // 'logical' slots. This is nominally 128 bits but it actually covers
235 // all possible 'physical' RVV vector register lengths from 128 ~ 1024
236 // bits. The 'physical' RVV vector register length is detected during
237 // startup, so the register allocator is able to identify the correct
238 // number of bytes needed for an RVV spill/unspill.
239
240 reg_def V0 ( SOC, SOC, Op_VecA, 0, v0->as_VMReg() );
241 reg_def V0_H ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next() );
242 reg_def V0_J ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(2) );
243 reg_def V0_K ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(3) );
244
245 reg_def V1 ( SOC, SOC, Op_VecA, 1, v1->as_VMReg() );
246 reg_def V1_H ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next() );
247 reg_def V1_J ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(2) );
248 reg_def V1_K ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(3) );
249
250 reg_def V2 ( SOC, SOC, Op_VecA, 2, v2->as_VMReg() );
251 reg_def V2_H ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next() );
252 reg_def V2_J ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(2) );
253 reg_def V2_K ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(3) );
254
255 reg_def V3 ( SOC, SOC, Op_VecA, 3, v3->as_VMReg() );
256 reg_def V3_H ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next() );
257 reg_def V3_J ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(2) );
258 reg_def V3_K ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(3) );
259
260 reg_def V4 ( SOC, SOC, Op_VecA, 4, v4->as_VMReg() );
261 reg_def V4_H ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next() );
262 reg_def V4_J ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(2) );
263 reg_def V4_K ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(3) );
264
265 reg_def V5 ( SOC, SOC, Op_VecA, 5, v5->as_VMReg() );
266 reg_def V5_H ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next() );
267 reg_def V5_J ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(2) );
268 reg_def V5_K ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(3) );
269
270 reg_def V6 ( SOC, SOC, Op_VecA, 6, v6->as_VMReg() );
271 reg_def V6_H ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next() );
272 reg_def V6_J ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(2) );
273 reg_def V6_K ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(3) );
274
275 reg_def V7 ( SOC, SOC, Op_VecA, 7, v7->as_VMReg() );
276 reg_def V7_H ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next() );
277 reg_def V7_J ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(2) );
278 reg_def V7_K ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(3) );
279
280 reg_def V8 ( SOC, SOC, Op_VecA, 8, v8->as_VMReg() );
281 reg_def V8_H ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next() );
282 reg_def V8_J ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(2) );
283 reg_def V8_K ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(3) );
284
285 reg_def V9 ( SOC, SOC, Op_VecA, 9, v9->as_VMReg() );
286 reg_def V9_H ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next() );
287 reg_def V9_J ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(2) );
288 reg_def V9_K ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(3) );
289
290 reg_def V10 ( SOC, SOC, Op_VecA, 10, v10->as_VMReg() );
291 reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next() );
292 reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) );
293 reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) );
294
295 reg_def V11 ( SOC, SOC, Op_VecA, 11, v11->as_VMReg() );
296 reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next() );
297 reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) );
298 reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) );
299
300 reg_def V12 ( SOC, SOC, Op_VecA, 12, v12->as_VMReg() );
301 reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next() );
302 reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) );
303 reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) );
304
305 reg_def V13 ( SOC, SOC, Op_VecA, 13, v13->as_VMReg() );
306 reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next() );
307 reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) );
308 reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) );
309
310 reg_def V14 ( SOC, SOC, Op_VecA, 14, v14->as_VMReg() );
311 reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next() );
312 reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) );
313 reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) );
314
315 reg_def V15 ( SOC, SOC, Op_VecA, 15, v15->as_VMReg() );
316 reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next() );
317 reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) );
318 reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) );
319
320 reg_def V16 ( SOC, SOC, Op_VecA, 16, v16->as_VMReg() );
321 reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next() );
322 reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) );
323 reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) );
324
325 reg_def V17 ( SOC, SOC, Op_VecA, 17, v17->as_VMReg() );
326 reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next() );
327 reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) );
328 reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) );
329
330 reg_def V18 ( SOC, SOC, Op_VecA, 18, v18->as_VMReg() );
331 reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next() );
332 reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) );
333 reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) );
334
335 reg_def V19 ( SOC, SOC, Op_VecA, 19, v19->as_VMReg() );
336 reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next() );
337 reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) );
338 reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) );
339
340 reg_def V20 ( SOC, SOC, Op_VecA, 20, v20->as_VMReg() );
341 reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next() );
342 reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) );
343 reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) );
344
345 reg_def V21 ( SOC, SOC, Op_VecA, 21, v21->as_VMReg() );
346 reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next() );
347 reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) );
348 reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) );
349
350 reg_def V22 ( SOC, SOC, Op_VecA, 22, v22->as_VMReg() );
351 reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next() );
352 reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) );
353 reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) );
354
355 reg_def V23 ( SOC, SOC, Op_VecA, 23, v23->as_VMReg() );
356 reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next() );
357 reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) );
358 reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) );
359
360 reg_def V24 ( SOC, SOC, Op_VecA, 24, v24->as_VMReg() );
361 reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next() );
362 reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) );
363 reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) );
364
365 reg_def V25 ( SOC, SOC, Op_VecA, 25, v25->as_VMReg() );
366 reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next() );
367 reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) );
368 reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) );
369
370 reg_def V26 ( SOC, SOC, Op_VecA, 26, v26->as_VMReg() );
371 reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next() );
372 reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) );
373 reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) );
374
375 reg_def V27 ( SOC, SOC, Op_VecA, 27, v27->as_VMReg() );
376 reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next() );
377 reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) );
378 reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) );
379
380 reg_def V28 ( SOC, SOC, Op_VecA, 28, v28->as_VMReg() );
381 reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next() );
382 reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) );
383 reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) );
384
385 reg_def V29 ( SOC, SOC, Op_VecA, 29, v29->as_VMReg() );
386 reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next() );
387 reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) );
388 reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) );
389
390 reg_def V30 ( SOC, SOC, Op_VecA, 30, v30->as_VMReg() );
391 reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next() );
392 reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) );
393 reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) );
394
395 reg_def V31 ( SOC, SOC, Op_VecA, 31, v31->as_VMReg() );
396 reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next() );
397 reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) );
398 reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) );
399
400 // ----------------------------
401 // Special Registers
402 // ----------------------------
403
404 // On riscv, the physical flag register is missing, so we use t1 instead,
405 // to bridge the RegFlag semantics in share/opto
406
407 reg_def RFLAGS (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg() );
408
409 // Specify priority of register selection within phases of register
410 // allocation. Highest priority is first. A useful heuristic is to
411 // give registers a low priority when they are required by machine
412 // instructions, like EAX and EDX on I486, and choose no-save registers
413 // before save-on-call, & save-on-call before save-on-entry. Registers
414 // which participate in fixed calling sequences should come last.
415 // Registers which are used as pairs must fall on an even boundary.
416
417 alloc_class chunk0(
418 // volatiles
419 R7, R7_H,
420 R28, R28_H,
421 R29, R29_H,
422 R30, R30_H,
423 R31, R31_H,
424
425 // arg registers
426 R10, R10_H,
427 R11, R11_H,
428 R12, R12_H,
429 R13, R13_H,
430 R14, R14_H,
431 R15, R15_H,
432 R16, R16_H,
433 R17, R17_H,
434
435 // non-volatiles
436 R9, R9_H,
437 R18, R18_H,
438 R19, R19_H,
439 R20, R20_H,
440 R21, R21_H,
441 R22, R22_H,
442 R24, R24_H,
443 R25, R25_H,
444 R26, R26_H,
445
446 // non-allocatable registers
447 R23, R23_H, // java thread
448 R27, R27_H, // heapbase
449 R4, R4_H, // thread
450 R8, R8_H, // fp
451 R0, R0_H, // zero
452 R1, R1_H, // ra
453 R2, R2_H, // sp
454 R3, R3_H, // gp
455 );
456
457 alloc_class chunk1(
458
459 // no save
460 F0, F0_H,
461 F1, F1_H,
462 F2, F2_H,
463 F3, F3_H,
464 F4, F4_H,
465 F5, F5_H,
466 F6, F6_H,
467 F7, F7_H,
468 F28, F28_H,
469 F29, F29_H,
470 F30, F30_H,
471 F31, F31_H,
472
473 // arg registers
474 F10, F10_H,
475 F11, F11_H,
476 F12, F12_H,
477 F13, F13_H,
478 F14, F14_H,
479 F15, F15_H,
480 F16, F16_H,
481 F17, F17_H,
482
483 // non-volatiles
484 F8, F8_H,
485 F9, F9_H,
486 F18, F18_H,
487 F19, F19_H,
488 F20, F20_H,
489 F21, F21_H,
490 F22, F22_H,
491 F23, F23_H,
492 F24, F24_H,
493 F25, F25_H,
494 F26, F26_H,
495 F27, F27_H,
496 );
497
498 alloc_class chunk2(
499 V0, V0_H, V0_J, V0_K,
500 V1, V1_H, V1_J, V1_K,
501 V2, V2_H, V2_J, V2_K,
502 V3, V3_H, V3_J, V3_K,
503 V4, V4_H, V4_J, V4_K,
504 V5, V5_H, V5_J, V5_K,
505 V6, V6_H, V6_J, V6_K,
506 V7, V7_H, V7_J, V7_K,
507 V8, V8_H, V8_J, V8_K,
508 V9, V9_H, V9_J, V9_K,
509 V10, V10_H, V10_J, V10_K,
510 V11, V11_H, V11_J, V11_K,
511 V12, V12_H, V12_J, V12_K,
512 V13, V13_H, V13_J, V13_K,
513 V14, V14_H, V14_J, V14_K,
514 V15, V15_H, V15_J, V15_K,
515 V16, V16_H, V16_J, V16_K,
516 V17, V17_H, V17_J, V17_K,
517 V18, V18_H, V18_J, V18_K,
518 V19, V19_H, V19_J, V19_K,
519 V20, V20_H, V20_J, V20_K,
520 V21, V21_H, V21_J, V21_K,
521 V22, V22_H, V22_J, V22_K,
522 V23, V23_H, V23_J, V23_K,
523 V24, V24_H, V24_J, V24_K,
524 V25, V25_H, V25_J, V25_K,
525 V26, V26_H, V26_J, V26_K,
526 V27, V27_H, V27_J, V27_K,
527 V28, V28_H, V28_J, V28_K,
528 V29, V29_H, V29_J, V29_K,
529 V30, V30_H, V30_J, V30_K,
530 V31, V31_H, V31_J, V31_K,
531 );
532
533 alloc_class chunk3(RFLAGS);
534
535 //----------Architecture Description Register Classes--------------------------
536 // Several register classes are automatically defined based upon information in
537 // this architecture description.
538 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
539 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
540 //
541
542 // Class for all 32 bit general purpose registers
543 reg_class all_reg32(
544 R0,
545 R1,
546 R2,
547 R3,
548 R4,
549 R7,
550 R8,
551 R9,
552 R10,
553 R11,
554 R12,
555 R13,
556 R14,
557 R15,
558 R16,
559 R17,
560 R18,
561 R19,
562 R20,
563 R21,
564 R22,
565 R23,
566 R24,
567 R25,
568 R26,
569 R27,
570 R28,
571 R29,
572 R30,
573 R31
574 );
575
576 // Class for any 32 bit integer registers (excluding zr)
577 reg_class any_reg32 %{
578 return _ANY_REG32_mask;
579 %}
580
581 // Singleton class for R10 int register
582 reg_class int_r10_reg(R10);
583
584 // Singleton class for R12 int register
585 reg_class int_r12_reg(R12);
586
587 // Singleton class for R13 int register
588 reg_class int_r13_reg(R13);
589
590 // Singleton class for R14 int register
591 reg_class int_r14_reg(R14);
592
593 // Class for all long integer registers
594 reg_class all_reg(
595 R0, R0_H,
596 R1, R1_H,
597 R2, R2_H,
598 R3, R3_H,
599 R4, R4_H,
600 R7, R7_H,
601 R8, R8_H,
602 R9, R9_H,
603 R10, R10_H,
604 R11, R11_H,
605 R12, R12_H,
606 R13, R13_H,
607 R14, R14_H,
608 R15, R15_H,
609 R16, R16_H,
610 R17, R17_H,
611 R18, R18_H,
612 R19, R19_H,
613 R20, R20_H,
614 R21, R21_H,
615 R22, R22_H,
616 R23, R23_H,
617 R24, R24_H,
618 R25, R25_H,
619 R26, R26_H,
620 R27, R27_H,
621 R28, R28_H,
622 R29, R29_H,
623 R30, R30_H,
624 R31, R31_H
625 );
626
627 // Class for all long integer registers (excluding zr)
628 reg_class any_reg %{
629 return _ANY_REG_mask;
630 %}
631
632 // Class for non-allocatable 32 bit registers
633 reg_class non_allocatable_reg32(
634 R0, // zr
635 R1, // ra
636 R2, // sp
637 R3, // gp
638 R4, // tp
639 R23 // java thread
640 );
641
642 // Class for non-allocatable 64 bit registers
643 reg_class non_allocatable_reg(
644 R0, R0_H, // zr
645 R1, R1_H, // ra
646 R2, R2_H, // sp
647 R3, R3_H, // gp
648 R4, R4_H, // tp
649 R23, R23_H // java thread
650 );
651
652 // Class for all non-special integer registers
653 reg_class no_special_reg32 %{
654 return _NO_SPECIAL_REG32_mask;
655 %}
656
657 // Class for all non-special long integer registers
658 reg_class no_special_reg %{
659 return _NO_SPECIAL_REG_mask;
660 %}
661
662 reg_class ptr_reg %{
663 return _PTR_REG_mask;
664 %}
665
666 // Class for all non_special pointer registers
667 reg_class no_special_ptr_reg %{
668 return _NO_SPECIAL_PTR_REG_mask;
669 %}
670
671 // Class for all non_special pointer registers (excluding fp)
672 reg_class no_special_no_fp_ptr_reg %{
673 return _NO_SPECIAL_NO_FP_PTR_REG_mask;
674 %}
675
676 // Class for 64 bit register r10
677 reg_class r10_reg(
678 R10, R10_H
679 );
680
681 // Class for 64 bit register r11
682 reg_class r11_reg(
683 R11, R11_H
684 );
685
686 // Class for 64 bit register r12
687 reg_class r12_reg(
688 R12, R12_H
689 );
690
691 // Class for 64 bit register r13
692 reg_class r13_reg(
693 R13, R13_H
694 );
695
696 // Class for 64 bit register r14
697 reg_class r14_reg(
698 R14, R14_H
699 );
700
701 // Class for 64 bit register r15
702 reg_class r15_reg(
703 R15, R15_H
704 );
705
706 // Class for 64 bit register r16
707 reg_class r16_reg(
708 R16, R16_H
709 );
710
711 // Class for method register
712 reg_class method_reg(
713 R31, R31_H
714 );
715
716 // Class for java thread register
717 reg_class java_thread_reg(
718 R23, R23_H
719 );
720
721 reg_class r28_reg(
722 R28, R28_H
723 );
724
725 reg_class r29_reg(
726 R29, R29_H
727 );
728
729 reg_class r30_reg(
730 R30, R30_H
731 );
732
733 reg_class r31_reg(
734 R31, R31_H
735 );
736
737 // Class for zero registesr
738 reg_class zr_reg(
739 R0, R0_H
740 );
741
742 // Class for thread register
743 reg_class thread_reg(
744 R4, R4_H
745 );
746
747 // Class for frame pointer register
748 reg_class fp_reg(
749 R8, R8_H
750 );
751
752 // Class for link register
753 reg_class ra_reg(
754 R1, R1_H
755 );
756
757 // Class for long sp register
758 reg_class sp_reg(
759 R2, R2_H
760 );
761
762 // Class for all float registers
763 reg_class float_reg(
764 F0,
765 F1,
766 F2,
767 F3,
768 F4,
769 F5,
770 F6,
771 F7,
772 F8,
773 F9,
774 F10,
775 F11,
776 F12,
777 F13,
778 F14,
779 F15,
780 F16,
781 F17,
782 F18,
783 F19,
784 F20,
785 F21,
786 F22,
787 F23,
788 F24,
789 F25,
790 F26,
791 F27,
792 F28,
793 F29,
794 F30,
795 F31
796 );
797
798 // Double precision float registers have virtual `high halves' that
799 // are needed by the allocator.
800 // Class for all double registers
801 reg_class double_reg(
802 F0, F0_H,
803 F1, F1_H,
804 F2, F2_H,
805 F3, F3_H,
806 F4, F4_H,
807 F5, F5_H,
808 F6, F6_H,
809 F7, F7_H,
810 F8, F8_H,
811 F9, F9_H,
812 F10, F10_H,
813 F11, F11_H,
814 F12, F12_H,
815 F13, F13_H,
816 F14, F14_H,
817 F15, F15_H,
818 F16, F16_H,
819 F17, F17_H,
820 F18, F18_H,
821 F19, F19_H,
822 F20, F20_H,
823 F21, F21_H,
824 F22, F22_H,
825 F23, F23_H,
826 F24, F24_H,
827 F25, F25_H,
828 F26, F26_H,
829 F27, F27_H,
830 F28, F28_H,
831 F29, F29_H,
832 F30, F30_H,
833 F31, F31_H
834 );
835
836 // Class for RVV vector registers
837 // Note: v0, v30 and v31 are used as mask registers.
838 reg_class vectora_reg(
839 V1, V1_H, V1_J, V1_K,
840 V2, V2_H, V2_J, V2_K,
841 V3, V3_H, V3_J, V3_K,
842 V4, V4_H, V4_J, V4_K,
843 V5, V5_H, V5_J, V5_K,
844 V6, V6_H, V6_J, V6_K,
845 V7, V7_H, V7_J, V7_K,
846 V8, V8_H, V8_J, V8_K,
847 V9, V9_H, V9_J, V9_K,
848 V10, V10_H, V10_J, V10_K,
849 V11, V11_H, V11_J, V11_K,
850 V12, V12_H, V12_J, V12_K,
851 V13, V13_H, V13_J, V13_K,
852 V14, V14_H, V14_J, V14_K,
853 V15, V15_H, V15_J, V15_K,
854 V16, V16_H, V16_J, V16_K,
855 V17, V17_H, V17_J, V17_K,
856 V18, V18_H, V18_J, V18_K,
857 V19, V19_H, V19_J, V19_K,
858 V20, V20_H, V20_J, V20_K,
859 V21, V21_H, V21_J, V21_K,
860 V22, V22_H, V22_J, V22_K,
861 V23, V23_H, V23_J, V23_K,
862 V24, V24_H, V24_J, V24_K,
863 V25, V25_H, V25_J, V25_K,
864 V26, V26_H, V26_J, V26_K,
865 V27, V27_H, V27_J, V27_K,
866 V28, V28_H, V28_J, V28_K,
867 V29, V29_H, V29_J, V29_K
868 );
869
870 // Class for 64 bit register f0
871 reg_class f0_reg(
872 F0, F0_H
873 );
874
875 // Class for 64 bit register f1
876 reg_class f1_reg(
877 F1, F1_H
878 );
879
880 // Class for 64 bit register f2
881 reg_class f2_reg(
882 F2, F2_H
883 );
884
885 // Class for 64 bit register f3
886 reg_class f3_reg(
887 F3, F3_H
888 );
889
890 // class for vector register v1
891 reg_class v1_reg(
892 V1, V1_H, V1_J, V1_K
893 );
894
895 // class for vector register v2
896 reg_class v2_reg(
897 V2, V2_H, V2_J, V2_K
898 );
899
900 // class for vector register v3
901 reg_class v3_reg(
902 V3, V3_H, V3_J, V3_K
903 );
904
905 // class for vector register v4
906 reg_class v4_reg(
907 V4, V4_H, V4_J, V4_K
908 );
909
910 // class for vector register v5
911 reg_class v5_reg(
912 V5, V5_H, V5_J, V5_K
913 );
914
915 // class for vector register v6
916 reg_class v6_reg(
917 V6, V6_H, V6_J, V6_K
918 );
919
920 // class for vector register v7
921 reg_class v7_reg(
922 V7, V7_H, V7_J, V7_K
923 );
924
925 // class for vector register v8
926 reg_class v8_reg(
927 V8, V8_H, V8_J, V8_K
928 );
929
930 // class for vector register v9
931 reg_class v9_reg(
932 V9, V9_H, V9_J, V9_K
933 );
934
935 // class for vector register v10
936 reg_class v10_reg(
937 V10, V10_H, V10_J, V10_K
938 );
939
940 // class for vector register v11
941 reg_class v11_reg(
942 V11, V11_H, V11_J, V11_K
943 );
944
945 // class for condition codes
946 reg_class reg_flags(RFLAGS);
947
948 // Class for RVV v0 mask register
949 // https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#53-vector-masking
950 // The mask value used to control execution of a masked vector
951 // instruction is always supplied by vector register v0.
952 reg_class vmask_reg_v0 (
953 V0
954 );
955
956 // Class for RVV mask registers
957 // We need two more vmask registers to do the vector mask logical ops,
958 // so define v30, v31 as mask register too.
959 reg_class vmask_reg (
960 V0,
961 V30,
962 V31
963 );
964 %}
965
966 //----------DEFINITION BLOCK---------------------------------------------------
967 // Define name --> value mappings to inform the ADLC of an integer valued name
968 // Current support includes integer values in the range [0, 0x7FFFFFFF]
969 // Format:
970 // int_def <name> ( <int_value>, <expression>);
971 // Generated Code in ad_<arch>.hpp
972 // #define <name> (<expression>)
973 // // value == <int_value>
974 // Generated code in ad_<arch>.cpp adlc_verification()
975 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
976 //
977
978 // we follow the ppc-aix port in using a simple cost model which ranks
979 // register operations as cheap, memory ops as more expensive and
980 // branches as most expensive. the first two have a low as well as a
981 // normal cost. huge cost appears to be a way of saying don't do
982 // something
983
984 definitions %{
985 // The default cost (of a register move instruction).
986 int_def DEFAULT_COST ( 100, 100);
987 int_def ALU_COST ( 100, 1 * DEFAULT_COST); // unknown, const, arith, shift, slt,
988 // multi, auipc, nop, logical, move
989 int_def LOAD_COST ( 300, 3 * DEFAULT_COST); // load, fpload
990 int_def STORE_COST ( 100, 1 * DEFAULT_COST); // store, fpstore
991 int_def XFER_COST ( 300, 3 * DEFAULT_COST); // mfc, mtc, fcvt, fmove, fcmp
992 int_def FMVX_COST ( 100, 1 * DEFAULT_COST); // shuffles with no conversion
993 int_def BRANCH_COST ( 200, 2 * DEFAULT_COST); // branch, jmp, call
994 int_def IMUL_COST ( 1000, 10 * DEFAULT_COST); // imul
995 int_def IDIVSI_COST ( 3400, 34 * DEFAULT_COST); // idivsi
996 int_def IDIVDI_COST ( 6600, 66 * DEFAULT_COST); // idivdi
997 int_def FMUL_SINGLE_COST ( 500, 5 * DEFAULT_COST); // fmul, fmadd
998 int_def FMUL_DOUBLE_COST ( 700, 7 * DEFAULT_COST); // fmul, fmadd
999 int_def FDIV_COST ( 2000, 20 * DEFAULT_COST); // fdiv
1000 int_def FSQRT_COST ( 2500, 25 * DEFAULT_COST); // fsqrt
1001 int_def VOLATILE_REF_COST ( 1000, 10 * DEFAULT_COST);
1002 int_def CACHE_MISS_COST ( 2000, 20 * DEFAULT_COST); // typicall cache miss penalty
1003 %}
1004
1005
1006
1007 //----------SOURCE BLOCK-------------------------------------------------------
1008 // This is a block of C++ code which provides values, functions, and
1009 // definitions necessary in the rest of the architecture description
1010
1011 source_hpp %{
1012
1013 #include "asm/macroAssembler.hpp"
1014 #include "gc/shared/barrierSetAssembler.hpp"
1015 #include "gc/shared/cardTable.hpp"
1016 #include "gc/shared/cardTableBarrierSet.hpp"
1017 #include "gc/shared/collectedHeap.hpp"
1018 #include "opto/addnode.hpp"
1019 #include "opto/convertnode.hpp"
1020 #include "runtime/objectMonitor.hpp"
1021
1022 extern RegMask _ANY_REG32_mask;
1023 extern RegMask _ANY_REG_mask;
1024 extern RegMask _PTR_REG_mask;
1025 extern RegMask _NO_SPECIAL_REG32_mask;
1026 extern RegMask _NO_SPECIAL_REG_mask;
1027 extern RegMask _NO_SPECIAL_PTR_REG_mask;
1028 extern RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
1029
1030 class CallStubImpl {
1031
1032 //--------------------------------------------------------------
1033 //---< Used for optimization in Compile::shorten_branches >---
1034 //--------------------------------------------------------------
1035
1036 public:
1037 // Size of call trampoline stub.
1038 static uint size_call_trampoline() {
1039 return 0; // no call trampolines on this platform
1040 }
1041
1042 // number of relocations needed by a call trampoline stub
1043 static uint reloc_call_trampoline() {
1044 return 0; // no call trampolines on this platform
1045 }
1046 };
1047
1048 class HandlerImpl {
1049
1050 public:
1051
1052 static int emit_deopt_handler(C2_MacroAssembler* masm);
1053
1054 static uint size_deopt_handler() {
1055 // count far call + j
1056 return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
1057 }
1058 };
1059
1060 class Node::PD {
1061 public:
1062 enum NodeFlags {
1063 _last_flag = Node::_last_flag
1064 };
1065 };
1066
1067 bool is_CAS(int opcode, bool maybe_volatile);
1068
1069 // predicate controlling translation of CompareAndSwapX
1070 bool needs_acquiring_load_reserved(const Node *load);
1071
1072 // predicate controlling addressing modes
1073 bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1074 %}
1075
1076 source %{
1077
1078 // Derived RegMask with conditionally allocatable registers
1079
1080 RegMask _ANY_REG32_mask;
1081 RegMask _ANY_REG_mask;
1082 RegMask _PTR_REG_mask;
1083 RegMask _NO_SPECIAL_REG32_mask;
1084 RegMask _NO_SPECIAL_REG_mask;
1085 RegMask _NO_SPECIAL_PTR_REG_mask;
1086 RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
1087
1088 void reg_mask_init() {
1089
1090 _ANY_REG32_mask.assignFrom(_ALL_REG32_mask);
1091 _ANY_REG32_mask.remove(OptoReg::as_OptoReg(x0->as_VMReg()));
1092
1093 _ANY_REG_mask.assignFrom(_ALL_REG_mask);
1094 _ANY_REG_mask.subtract(_ZR_REG_mask);
1095
1096 _PTR_REG_mask.assignFrom(_ALL_REG_mask);
1097 _PTR_REG_mask.subtract(_ZR_REG_mask);
1098
1099 _NO_SPECIAL_REG32_mask.assignFrom(_ALL_REG32_mask);
1100 _NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask);
1101
1102 _NO_SPECIAL_REG_mask.assignFrom(_ALL_REG_mask);
1103 _NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
1104
1105 _NO_SPECIAL_PTR_REG_mask.assignFrom(_ALL_REG_mask);
1106 _NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
1107
1108 // x27 is not allocatable when compressed oops is on
1109 if (UseCompressedOops) {
1110 _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
1111 _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
1112 _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
1113 }
1114
1115 // x8 is not allocatable when PreserveFramePointer is on
1116 if (PreserveFramePointer) {
1117 _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1118 _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1119 _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1120 }
1121
1122 _NO_SPECIAL_NO_FP_PTR_REG_mask.assignFrom(_NO_SPECIAL_PTR_REG_mask);
1123 _NO_SPECIAL_NO_FP_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1124 }
1125
1126 void PhaseOutput::pd_perform_mach_node_analysis() {
1127 }
1128
1129 int MachNode::pd_alignment_required() const {
1130 return 1;
1131 }
1132
1133 int MachNode::compute_padding(int current_offset) const {
1134 return 0;
1135 }
1136
1137 // is_CAS(int opcode, bool maybe_volatile)
1138 //
1139 // return true if opcode is one of the possible CompareAndSwapX
1140 // values otherwise false.
1141 bool is_CAS(int opcode, bool maybe_volatile)
1142 {
1143 switch (opcode) {
1144 // We handle these
1145 case Op_CompareAndSwapI:
1146 case Op_CompareAndSwapL:
1147 case Op_CompareAndSwapP:
1148 case Op_CompareAndSwapN:
1149 case Op_CompareAndSwapB:
1150 case Op_CompareAndSwapS:
1151 case Op_GetAndSetI:
1152 case Op_GetAndSetL:
1153 case Op_GetAndSetP:
1154 case Op_GetAndSetN:
1155 case Op_GetAndAddI:
1156 case Op_GetAndAddL:
1157 return true;
1158 case Op_CompareAndExchangeI:
1159 case Op_CompareAndExchangeN:
1160 case Op_CompareAndExchangeB:
1161 case Op_CompareAndExchangeS:
1162 case Op_CompareAndExchangeL:
1163 case Op_CompareAndExchangeP:
1164 case Op_WeakCompareAndSwapB:
1165 case Op_WeakCompareAndSwapS:
1166 case Op_WeakCompareAndSwapI:
1167 case Op_WeakCompareAndSwapL:
1168 case Op_WeakCompareAndSwapP:
1169 case Op_WeakCompareAndSwapN:
1170 return maybe_volatile;
1171 default:
1172 return false;
1173 }
1174 }
1175
1176 constexpr uint64_t MAJIK_DWORD = 0xabbaabbaabbaabbaull;
1177
1178 // predicate controlling translation of CAS
1179 //
1180 // returns true if CAS needs to use an acquiring load otherwise false
1181 bool needs_acquiring_load_reserved(const Node *n)
1182 {
1183 assert(n != nullptr && is_CAS(n->Opcode(), true), "expecting a compare and swap");
1184
1185 LoadStoreNode* ldst = n->as_LoadStore();
1186 if (n != nullptr && is_CAS(n->Opcode(), false)) {
1187 assert(ldst != nullptr && ldst->trailing_membar() != nullptr, "expected trailing membar");
1188 } else {
1189 return ldst != nullptr && ldst->trailing_membar() != nullptr;
1190 }
1191 // so we can just return true here
1192 return true;
1193 }
1194 #define __ masm->
1195
1196 // advance declarations for helper functions to convert register
1197 // indices to register objects
1198
1199 // the ad file has to provide implementations of certain methods
1200 // expected by the generic code
1201 //
1202 // REQUIRED FUNCTIONALITY
1203
1204 //=============================================================================
1205
1206 // !!!!! Special hack to get all types of calls to specify the byte offset
1207 // from the start of the call to the point where the return address
1208 // will point.
1209
1210 int MachCallStaticJavaNode::ret_addr_offset()
1211 {
1212 return 3 * NativeInstruction::instruction_size; // auipc + ld + jalr
1213 }
1214
1215 int MachCallDynamicJavaNode::ret_addr_offset()
1216 {
1217 return NativeMovConstReg::movptr2_instruction_size + (3 * NativeInstruction::instruction_size); // movptr2, auipc + ld + jal
1218 }
1219
1220 int MachCallRuntimeNode::ret_addr_offset() {
1221 // For address inside the code cache the call will be:
1222 // auipc + jalr
1223 // For real runtime callouts it will be 8 instructions
1224 // see riscv_enc_java_to_runtime
1225 // la(t0, retaddr) -> auipc + addi
1226 // sd(t0, Address(xthread, JavaThread::last_Java_pc_offset())) -> sd
1227 // movptr(t1, addr, offset, t0) -> lui + lui + slli + add
1228 // jalr(t1, offset) -> jalr
1229 if (CodeCache::contains(_entry_point)) {
1230 return 2 * NativeInstruction::instruction_size;
1231 } else {
1232 return 8 * NativeInstruction::instruction_size;
1233 }
1234 }
1235
1236 //
1237 // Compute padding required for nodes which need alignment
1238 //
1239
1240 // With RVC a call instruction may get 2-byte aligned.
1241 // The address of the call instruction needs to be 4-byte aligned to
1242 // ensure that it does not span a cache line so that it can be patched.
1243 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
1244 {
1245 // to make sure the address of jal 4-byte aligned.
1246 return align_up(current_offset, alignment_required()) - current_offset;
1247 }
1248
1249 // With RVC a call instruction may get 2-byte aligned.
1250 // The address of the call instruction needs to be 4-byte aligned to
1251 // ensure that it does not span a cache line so that it can be patched.
1252 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
1253 {
1254 // skip the movptr2 in MacroAssembler::ic_call():
1255 // lui, lui, slli, add, addi
1256 // Though movptr2() has already 4-byte aligned with or without RVC,
1257 // We need to prevent from further changes by explicitly calculating the size.
1258 current_offset += NativeMovConstReg::movptr2_instruction_size;
1259 // to make sure the address of jal 4-byte aligned.
1260 return align_up(current_offset, alignment_required()) - current_offset;
1261 }
1262
1263 int CallRuntimeDirectNode::compute_padding(int current_offset) const
1264 {
1265 return align_up(current_offset, alignment_required()) - current_offset;
1266 }
1267
1268 int CallLeafDirectNode::compute_padding(int current_offset) const
1269 {
1270 return align_up(current_offset, alignment_required()) - current_offset;
1271 }
1272
1273 int CallLeafDirectVectorNode::compute_padding(int current_offset) const
1274 {
1275 return align_up(current_offset, alignment_required()) - current_offset;
1276 }
1277
1278 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const
1279 {
1280 return align_up(current_offset, alignment_required()) - current_offset;
1281 }
1282
1283 //=============================================================================
1284
1285 #ifndef PRODUCT
1286 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1287 assert_cond(st != nullptr);
1288 st->print("BREAKPOINT");
1289 }
1290 #endif
1291
1292 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1293 __ ebreak();
1294 }
1295
1296 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1297 return MachNode::size(ra_);
1298 }
1299
1300 //=============================================================================
1301
1302 #ifndef PRODUCT
1303 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1304 st->print("nop \t# %d bytes pad for loops and calls", _count);
1305 }
1306 #endif
1307
1308 void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
1309 Assembler::CompressibleScope scope(masm); // nops shall be 2-byte under RVC for alignment purposes.
1310 for (int i = 0; i < _count; i++) {
1311 __ nop();
1312 }
1313 }
1314
1315 uint MachNopNode::size(PhaseRegAlloc*) const {
1316 return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size);
1317 }
1318
1319 //=============================================================================
1320 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
1321
1322 int ConstantTable::calculate_table_base_offset() const {
1323 return 0; // absolute addressing, no offset
1324 }
1325
1326 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1327 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1328 ShouldNotReachHere();
1329 }
1330
1331 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
1332 // Empty encoding
1333 }
1334
1335 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1336 return 0;
1337 }
1338
1339 #ifndef PRODUCT
1340 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1341 assert_cond(st != nullptr);
1342 st->print("-- \t// MachConstantBaseNode (empty encoding)");
1343 }
1344 #endif
1345
1346 #ifndef PRODUCT
1347 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1348 assert_cond(st != nullptr && ra_ != nullptr);
1349 Compile* C = ra_->C;
1350
1351 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1352
1353 if (C->output()->need_stack_bang(framesize)) {
1354 st->print("# stack bang size=%d\n\t", framesize);
1355 }
1356
1357 st->print("sub sp, sp, #%d\n\t", framesize);
1358 st->print("sd fp, [sp, #%d]\n\t", framesize - 2 * wordSize);
1359 st->print("sd ra, [sp, #%d]\n\t", framesize - wordSize);
1360 if (PreserveFramePointer) { st->print("add fp, sp, #%d\n\t", framesize); }
1361
1362 if (VerifyStackAtCalls) {
1363 st->print("mv t2, %ld\n\t", MAJIK_DWORD);
1364 st->print("sd t2, [sp, #%d]\n\t", framesize - 3 * wordSize);
1365 }
1366
1367 if (C->stub_function() == nullptr) {
1368 st->print("ld t0, [guard]\n\t");
1369 st->print("membar LoadLoad\n\t");
1370 st->print("ld t1, [xthread, #thread_disarmed_guard_value_offset]\n\t");
1371 st->print("beq t0, t1, skip\n\t");
1372 st->print("jalr #nmethod_entry_barrier_stub\n\t");
1373 st->print("j skip\n\t");
1374 st->print("guard: int\n\t");
1375 st->print("skip:\n\t");
1376 }
1377 }
1378 #endif
1379
1380 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1381 assert_cond(ra_ != nullptr);
1382 Compile* C = ra_->C;
1383
1384 // n.b. frame size includes space for return pc and fp
1385 const int framesize = C->output()->frame_size_in_bytes();
1386
1387 assert_cond(C != nullptr);
1388
1389 if (C->clinit_barrier_on_entry()) {
1390 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1391
1392 Label L_skip_barrier;
1393
1394 __ mov_metadata(t1, C->method()->holder()->constant_encoding());
1395 __ clinit_barrier(t1, t0, &L_skip_barrier);
1396 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1397 __ bind(L_skip_barrier);
1398 }
1399
1400 int bangsize = C->output()->bang_size_in_bytes();
1401 if (C->output()->need_stack_bang(bangsize)) {
1402 __ generate_stack_overflow_check(bangsize);
1403 }
1404
1405 __ build_frame(framesize);
1406
1407 if (VerifyStackAtCalls) {
1408 __ mv(t2, MAJIK_DWORD);
1409 __ sd(t2, Address(sp, framesize - 3 * wordSize));
1410 }
1411
1412 if (C->stub_function() == nullptr) {
1413 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1414 // Dummy labels for just measuring the code size
1415 Label dummy_slow_path;
1416 Label dummy_continuation;
1417 Label dummy_guard;
1418 Label* slow_path = &dummy_slow_path;
1419 Label* continuation = &dummy_continuation;
1420 Label* guard = &dummy_guard;
1421 if (!Compile::current()->output()->in_scratch_emit_size()) {
1422 // Use real labels from actual stub when not emitting code for purpose of measuring its size
1423 C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
1424 Compile::current()->output()->add_stub(stub);
1425 slow_path = &stub->entry();
1426 continuation = &stub->continuation();
1427 guard = &stub->guard();
1428 }
1429 // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
1430 bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
1431 }
1432
1433 C->output()->set_frame_complete(__ offset());
1434
1435 if (C->has_mach_constant_base_node()) {
1436 // NOTE: We set the table base offset here because users might be
1437 // emitted before MachConstantBaseNode.
1438 ConstantTable& constant_table = C->output()->constant_table();
1439 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1440 }
1441 }
1442
1443 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1444 {
1445 assert_cond(ra_ != nullptr);
1446 return MachNode::size(ra_); // too many variables; just compute it
1447 // the hard way
1448 }
1449
1450 int MachPrologNode::reloc() const
1451 {
1452 return 0;
1453 }
1454
1455 //=============================================================================
1456
1457 #ifndef PRODUCT
1458 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1459 assert_cond(st != nullptr && ra_ != nullptr);
1460 Compile* C = ra_->C;
1461 assert_cond(C != nullptr);
1462 int framesize = C->output()->frame_size_in_bytes();
1463
1464 st->print("# pop frame %d\n\t", framesize);
1465
1466 if (framesize == 0) {
1467 st->print("ld ra, [sp,#%d]\n\t", (2 * wordSize));
1468 st->print("ld fp, [sp,#%d]\n\t", (3 * wordSize));
1469 st->print("add sp, sp, #%d\n\t", (2 * wordSize));
1470 } else {
1471 st->print("add sp, sp, #%d\n\t", framesize);
1472 st->print("ld ra, [sp,#%d]\n\t", - 2 * wordSize);
1473 st->print("ld fp, [sp,#%d]\n\t", - wordSize);
1474 }
1475
1476 if (do_polling() && C->is_method_compilation()) {
1477 st->print("# test polling word\n\t");
1478 st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset()));
1479 st->print("bgtu sp, t0, #slow_path");
1480 }
1481 }
1482 #endif
1483
1484 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1485 assert_cond(ra_ != nullptr);
1486 Compile* C = ra_->C;
1487 assert_cond(C != nullptr);
1488 int framesize = C->output()->frame_size_in_bytes();
1489
1490 __ remove_frame(framesize);
1491
1492 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1493 __ reserved_stack_check();
1494 }
1495
1496 if (do_polling() && C->is_method_compilation()) {
1497 Label dummy_label;
1498 Label* code_stub = &dummy_label;
1499 if (!C->output()->in_scratch_emit_size()) {
1500 C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
1501 C->output()->add_stub(stub);
1502 code_stub = &stub->entry();
1503 }
1504 __ relocate(relocInfo::poll_return_type);
1505 __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
1506 }
1507 }
1508
1509 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1510 assert_cond(ra_ != nullptr);
1511 // Variable size. Determine dynamically.
1512 return MachNode::size(ra_);
1513 }
1514
1515 int MachEpilogNode::reloc() const {
1516 // Return number of relocatable values contained in this instruction.
1517 return 1; // 1 for polling page.
1518 }
1519 const Pipeline * MachEpilogNode::pipeline() const {
1520 return MachNode::pipeline_class();
1521 }
1522
1523 //=============================================================================
1524
1525 // Figure out which register class each belongs in: rc_int, rc_float or
1526 // rc_stack.
1527 enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack };
1528
1529 static enum RC rc_class(OptoReg::Name reg) {
1530
1531 if (reg == OptoReg::Bad) {
1532 return rc_bad;
1533 }
1534
1535 // we have 30 int registers * 2 halves
1536 // (t0 and t1 are omitted)
1537 int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2);
1538 if (reg < slots_of_int_registers) {
1539 return rc_int;
1540 }
1541
1542 // we have 32 float register * 2 halves
1543 int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers;
1544 if (reg < slots_of_int_registers + slots_of_float_registers) {
1545 return rc_float;
1546 }
1547
1548 // we have 32 vector register * 4 halves
1549 int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers;
1550 if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) {
1551 return rc_vector;
1552 }
1553
1554 // Between vector regs & stack is the flags regs.
1555 assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1556
1557 return rc_stack;
1558 }
1559
1560 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1561 assert_cond(ra_ != nullptr);
1562 Compile* C = ra_->C;
1563
1564 // Get registers to move.
1565 OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1566 OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1567 OptoReg::Name dst_hi = ra_->get_reg_second(this);
1568 OptoReg::Name dst_lo = ra_->get_reg_first(this);
1569
1570 enum RC src_hi_rc = rc_class(src_hi);
1571 enum RC src_lo_rc = rc_class(src_lo);
1572 enum RC dst_hi_rc = rc_class(dst_hi);
1573 enum RC dst_lo_rc = rc_class(dst_lo);
1574
1575 assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1576
1577 if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
1578 assert((src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1579 (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi,
1580 "expected aligned-adjacent pairs");
1581 }
1582
1583 if (src_lo == dst_lo && src_hi == dst_hi) {
1584 return 0; // Self copy, no move.
1585 }
1586
1587 bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1588 (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1589 int src_offset = ra_->reg2offset(src_lo);
1590 int dst_offset = ra_->reg2offset(dst_lo);
1591
1592 if (bottom_type()->isa_vect() != nullptr) {
1593 uint ireg = ideal_reg();
1594 if (ireg == Op_VecA && masm) {
1595 int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
1596 if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1597 // stack to stack
1598 __ spill_copy_vector_stack_to_stack(src_offset, dst_offset,
1599 vector_reg_size_in_bytes);
1600 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
1601 // vpr to stack
1602 __ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
1603 } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
1604 // stack to vpr
1605 __ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
1606 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
1607 // vpr to vpr
1608 __ vsetvli_helper(T_BYTE, MaxVectorSize);
1609 __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
1610 } else {
1611 ShouldNotReachHere();
1612 }
1613 } else if (bottom_type()->isa_vectmask() && masm) {
1614 int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
1615 if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1616 // stack to stack
1617 __ spill_copy_vmask_stack_to_stack(src_offset, dst_offset,
1618 vmask_size_in_bytes);
1619 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
1620 // vmask to stack
1621 __ spill_vmask(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
1622 } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
1623 // stack to vmask
1624 __ unspill_vmask(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
1625 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
1626 // vmask to vmask
1627 __ vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
1628 __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
1629 } else {
1630 ShouldNotReachHere();
1631 }
1632 }
1633 } else if (masm != nullptr) {
1634 switch (src_lo_rc) {
1635 case rc_int:
1636 if (dst_lo_rc == rc_int) { // gpr --> gpr copy
1637 if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass
1638 __ zext(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32);
1639 } else {
1640 __ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]));
1641 }
1642 } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1643 if (is64) {
1644 __ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1645 as_Register(Matcher::_regEncode[src_lo]));
1646 } else {
1647 __ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1648 as_Register(Matcher::_regEncode[src_lo]));
1649 }
1650 } else { // gpr --> stack spill
1651 assert(dst_lo_rc == rc_stack, "spill to bad register class");
1652 __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1653 }
1654 break;
1655 case rc_float:
1656 if (dst_lo_rc == rc_int) { // fpr --> gpr copy
1657 if (is64) {
1658 __ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]),
1659 as_FloatRegister(Matcher::_regEncode[src_lo]));
1660 } else {
1661 __ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]),
1662 as_FloatRegister(Matcher::_regEncode[src_lo]));
1663 }
1664 } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1665 if (is64) {
1666 __ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1667 as_FloatRegister(Matcher::_regEncode[src_lo]));
1668 } else {
1669 __ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1670 as_FloatRegister(Matcher::_regEncode[src_lo]));
1671 }
1672 } else { // fpr --> stack spill
1673 assert(dst_lo_rc == rc_stack, "spill to bad register class");
1674 __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1675 is64, dst_offset);
1676 }
1677 break;
1678 case rc_stack:
1679 if (dst_lo_rc == rc_int) { // stack --> gpr load
1680 if (this->ideal_reg() == Op_RegI) {
1681 __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1682 } else { // // zero extended for narrow oop or klass
1683 __ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1684 }
1685 } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1686 __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1687 is64, src_offset);
1688 } else { // stack --> stack copy
1689 assert(dst_lo_rc == rc_stack, "spill to bad register class");
1690 if (this->ideal_reg() == Op_RegI) {
1691 __ unspill(t0, is64, src_offset);
1692 } else { // zero extended for narrow oop or klass
1693 __ unspillu(t0, is64, src_offset);
1694 }
1695 __ spill(t0, is64, dst_offset);
1696 }
1697 break;
1698 default:
1699 ShouldNotReachHere();
1700 }
1701 }
1702
1703 if (st != nullptr) {
1704 st->print("spill ");
1705 if (src_lo_rc == rc_stack) {
1706 st->print("[sp, #%d] -> ", src_offset);
1707 } else {
1708 st->print("%s -> ", Matcher::regName[src_lo]);
1709 }
1710 if (dst_lo_rc == rc_stack) {
1711 st->print("[sp, #%d]", dst_offset);
1712 } else {
1713 st->print("%s", Matcher::regName[dst_lo]);
1714 }
1715 if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
1716 int vsize = 0;
1717 if (ideal_reg() == Op_VecA) {
1718 vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
1719 } else {
1720 ShouldNotReachHere();
1721 }
1722 st->print("\t# vector spill size = %d", vsize);
1723 } else if (ideal_reg() == Op_RegVectMask) {
1724 assert(Matcher::supports_scalable_vector(), "bad register type for spill");
1725 int vsize = Matcher::scalable_predicate_reg_slots() * 32;
1726 st->print("\t# vmask spill size = %d", vsize);
1727 } else {
1728 st->print("\t# spill size = %d", is64 ? 64 : 32);
1729 }
1730 }
1731
1732 return 0;
1733 }
1734
1735 #ifndef PRODUCT
1736 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1737 if (ra_ == nullptr) {
1738 st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1739 } else {
1740 implementation(nullptr, ra_, false, st);
1741 }
1742 }
1743 #endif
1744
1745 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1746 implementation(masm, ra_, false, nullptr);
1747 }
1748
1749 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1750 return MachNode::size(ra_);
1751 }
1752
1753 //=============================================================================
1754
1755 #ifndef PRODUCT
1756 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1757 assert_cond(ra_ != nullptr && st != nullptr);
1758 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1759 int reg = ra_->get_reg_first(this);
1760 st->print("add %s, sp, #%d\t# box lock",
1761 Matcher::regName[reg], offset);
1762 }
1763 #endif
1764
1765 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1766 Assembler::IncompressibleScope scope(masm); // Fixed length: see BoxLockNode::size()
1767
1768 assert_cond(ra_ != nullptr);
1769 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1770 int reg = ra_->get_encode(this);
1771
1772 if (Assembler::is_simm12(offset)) {
1773 __ addi(as_Register(reg), sp, offset);
1774 } else {
1775 __ li32(t0, offset);
1776 __ add(as_Register(reg), sp, t0);
1777 }
1778 }
1779
1780 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1781 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1782 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1783
1784 if (Assembler::is_simm12(offset)) {
1785 return NativeInstruction::instruction_size;
1786 } else {
1787 return 3 * NativeInstruction::instruction_size; // lui + addiw + add;
1788 }
1789 }
1790
1791 //=============================================================================
1792
1793 #ifndef PRODUCT
1794 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1795 {
1796 assert_cond(st != nullptr);
1797 st->print_cr("# MachUEPNode");
1798 if (UseCompressedClassPointers) {
1799 st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1800 st->print_cr("\tlwu t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
1801 } else {
1802 st->print_cr("\tld t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1803 st->print_cr("\tld t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
1804 }
1805 st->print_cr("\tbeq t1, t2, ic_hit");
1806 st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
1807 st->print_cr("\tic_hit:");
1808 }
1809 #endif
1810
1811 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
1812 {
1813 // This is the unverified entry point.
1814 __ ic_check(CodeEntryAlignment);
1815
1816 // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
1817 assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
1818 }
1819
1820 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1821 {
1822 assert_cond(ra_ != nullptr);
1823 return MachNode::size(ra_);
1824 }
1825
1826 // REQUIRED EMIT CODE
1827
1828 //=============================================================================
1829
1830 // Emit deopt handler code.
1831 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
1832 {
1833 address base = __ start_a_stub(size_deopt_handler());
1834 if (base == nullptr) {
1835 ciEnv::current()->record_failure("CodeCache is full");
1836 return 0; // CodeBuffer::expand failed
1837 }
1838 int offset = __ offset();
1839
1840 Label start;
1841 __ bind(start);
1842
1843 __ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1844
1845 int entry_offset = __ offset();
1846 __ j(start);
1847
1848 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1849 assert(__ offset() - entry_offset >= NativePostCallNop::first_check_size,
1850 "out of bounds read in post-call NOP check");
1851 __ end_a_stub();
1852 return entry_offset;
1853
1854 }
1855 // REQUIRED MATCHER CODE
1856
1857 //=============================================================================
1858
1859 bool Matcher::match_rule_supported(int opcode) {
1860 if (!has_match_rule(opcode)) {
1861 return false;
1862 }
1863
1864 switch (opcode) {
1865 case Op_OnSpinWait:
1866 return VM_Version::supports_on_spin_wait();
1867 case Op_CacheWB: // fall through
1868 case Op_CacheWBPreSync: // fall through
1869 case Op_CacheWBPostSync:
1870 if (!VM_Version::supports_data_cache_line_flush()) {
1871 return false;
1872 }
1873 break;
1874
1875 case Op_StrCompressedCopy: // fall through
1876 case Op_StrInflatedCopy: // fall through
1877 case Op_CountPositives: // fall through
1878 case Op_EncodeISOArray:
1879 return UseRVV;
1880
1881 case Op_PopCountI:
1882 case Op_PopCountL:
1883 return UsePopCountInstruction;
1884
1885 case Op_ReverseI:
1886 case Op_ReverseL:
1887 return UseZbkb;
1888
1889 case Op_ReverseBytesI:
1890 case Op_ReverseBytesL:
1891 case Op_ReverseBytesS:
1892 case Op_ReverseBytesUS:
1893 case Op_RotateRight:
1894 case Op_RotateLeft:
1895 case Op_CountLeadingZerosI:
1896 case Op_CountLeadingZerosL:
1897 case Op_CountTrailingZerosI:
1898 case Op_CountTrailingZerosL:
1899 return UseZbb;
1900
1901 case Op_FmaF:
1902 case Op_FmaD:
1903 return UseFMA;
1904
1905 case Op_ConvHF2F:
1906 case Op_ConvF2HF:
1907 return VM_Version::supports_float16_float_conversion();
1908 case Op_ReinterpretS2HF:
1909 case Op_ReinterpretHF2S:
1910 return UseZfh || UseZfhmin;
1911 case Op_AddHF:
1912 case Op_DivHF:
1913 case Op_FmaHF:
1914 case Op_MaxHF:
1915 case Op_MinHF:
1916 case Op_MulHF:
1917 case Op_SqrtHF:
1918 case Op_SubHF:
1919 return UseZfh;
1920
1921 case Op_CMoveP:
1922 case Op_CMoveN:
1923 return false;
1924 }
1925
1926 return true; // Per default match rules are supported.
1927 }
1928
1929 const RegMask* Matcher::predicate_reg_mask(void) {
1930 return &_VMASK_REG_mask;
1931 }
1932
1933 // Vector calling convention not yet implemented.
1934 bool Matcher::supports_vector_calling_convention(void) {
1935 return EnableVectorSupport;
1936 }
1937
1938 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
1939 assert(EnableVectorSupport, "sanity");
1940 assert(ideal_reg == Op_VecA, "sanity");
1941 // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
1942 int lo = V8_num;
1943 int hi = V8_K_num;
1944 return OptoRegPair(hi, lo);
1945 }
1946
1947 // Is this branch offset short enough that a short branch can be used?
1948 //
1949 // NOTE: If the platform does not provide any short branch variants, then
1950 // this method should return false for offset 0.
1951 // |---label(L1)-----|
1952 // |-----------------|
1953 // |-----------------|----------eq: float-------------------
1954 // |-----------------| // far_cmpD_branch | cmpD_branch
1955 // |------- ---------| feq; | feq;
1956 // |-far_cmpD_branch-| beqz done; | bnez L;
1957 // |-----------------| j L; |
1958 // |-----------------| bind(done); |
1959 // |-----------------|--------------------------------------
1960 // |-----------------| // so shortBrSize = br_size - 4;
1961 // |-----------------| // so offs = offset - shortBrSize + 4;
1962 // |---label(L2)-----|
1963 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1964 // The passed offset is relative to address of the branch.
1965 int shortBrSize = br_size - 4;
1966 int offs = offset - shortBrSize + 4;
1967 return (-4096 <= offs && offs < 4096);
1968 }
1969
1970 // Vector width in bytes.
1971 int Matcher::vector_width_in_bytes(BasicType bt) {
1972 if (UseRVV) {
1973 // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
1974 // MaxVectorSize == VM_Version::_initial_vector_length
1975 int size = MaxVectorSize;
1976 // Minimum 2 values in vector
1977 if (size < 2 * type2aelembytes(bt)) size = 0;
1978 // But never < 4
1979 if (size < 4) size = 0;
1980 return size;
1981 }
1982 return 0;
1983 }
1984
1985 // Limits on vector size (number of elements) loaded into vector.
1986 int Matcher::max_vector_size(const BasicType bt) {
1987 return vector_width_in_bytes(bt) / type2aelembytes(bt);
1988 }
1989
1990 int Matcher::min_vector_size(const BasicType bt) {
1991 int size;
1992 switch(bt) {
1993 case T_BOOLEAN:
1994 // Load/store a vector mask with only 2 elements for vector types
1995 // such as "2I/2F/2L/2D".
1996 size = 2;
1997 break;
1998 case T_BYTE:
1999 // Generate a "4B" vector, to support vector cast between "8B/16B"
2000 // and "4S/4I/4L/4F/4D".
2001 size = 4;
2002 break;
2003 case T_SHORT:
2004 // Generate a "2S" vector, to support vector cast between "4S/8S"
2005 // and "2I/2L/2F/2D".
2006 size = 2;
2007 break;
2008 default:
2009 // Limit the min vector length to 64-bit.
2010 size = 8 / type2aelembytes(bt);
2011 // The number of elements in a vector should be at least 2.
2012 size = MAX2(size, 2);
2013 }
2014
2015 int max_size = max_vector_size(bt);
2016 return MIN2(size, max_size);
2017 }
2018
2019 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
2020 return Matcher::max_vector_size(bt);
2021 }
2022
2023 // Vector ideal reg.
2024 uint Matcher::vector_ideal_reg(int len) {
2025 assert(MaxVectorSize >= len, "");
2026 if (UseRVV) {
2027 return Op_VecA;
2028 }
2029
2030 ShouldNotReachHere();
2031 return 0;
2032 }
2033
2034 int Matcher::scalable_vector_reg_size(const BasicType bt) {
2035 return Matcher::max_vector_size(bt);
2036 }
2037
2038 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
2039 ShouldNotReachHere(); // generic vector operands not supported
2040 return nullptr;
2041 }
2042
2043 bool Matcher::is_reg2reg_move(MachNode* m) {
2044 ShouldNotReachHere(); // generic vector operands not supported
2045 return false;
2046 }
2047
2048 bool Matcher::is_register_biasing_candidate(const MachNode* mdef, int oper_index) {
2049 return false;
2050 }
2051
2052 bool Matcher::is_generic_vector(MachOper* opnd) {
2053 ShouldNotReachHere(); // generic vector operands not supported
2054 return false;
2055 }
2056
2057 #ifdef ASSERT
2058 // Return whether or not this register is ever used as an argument.
2059 bool Matcher::can_be_java_arg(int reg)
2060 {
2061 return
2062 reg == R10_num || reg == R10_H_num ||
2063 reg == R11_num || reg == R11_H_num ||
2064 reg == R12_num || reg == R12_H_num ||
2065 reg == R13_num || reg == R13_H_num ||
2066 reg == R14_num || reg == R14_H_num ||
2067 reg == R15_num || reg == R15_H_num ||
2068 reg == R16_num || reg == R16_H_num ||
2069 reg == R17_num || reg == R17_H_num ||
2070 reg == F10_num || reg == F10_H_num ||
2071 reg == F11_num || reg == F11_H_num ||
2072 reg == F12_num || reg == F12_H_num ||
2073 reg == F13_num || reg == F13_H_num ||
2074 reg == F14_num || reg == F14_H_num ||
2075 reg == F15_num || reg == F15_H_num ||
2076 reg == F16_num || reg == F16_H_num ||
2077 reg == F17_num || reg == F17_H_num;
2078 }
2079 #endif
2080
2081 uint Matcher::int_pressure_limit()
2082 {
2083 // A derived pointer is live at CallNode and then is flagged by RA
2084 // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
2085 // derived pointers and lastly fail to spill after reaching maximum
2086 // number of iterations. Lowering the default pressure threshold to
2087 // (_NO_SPECIAL_REG32_mask.size() minus 1) forces CallNode to become
2088 // a high register pressure area of the code so that split_DEF can
2089 // generate DefinitionSpillCopy for the derived pointer.
2090 uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.size() - 1;
2091 if (!PreserveFramePointer) {
2092 // When PreserveFramePointer is off, frame pointer is allocatable,
2093 // but different from other SOC registers, it is excluded from
2094 // fatproj's mask because its save type is No-Save. Decrease 1 to
2095 // ensure high pressure at fatproj when PreserveFramePointer is off.
2096 // See check_pressure_at_fatproj().
2097 default_int_pressure_threshold--;
2098 }
2099 return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
2100 }
2101
2102 uint Matcher::float_pressure_limit()
2103 {
2104 // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
2105 return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
2106 }
2107
2108 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2109 return false;
2110 }
2111
2112 const RegMask& Matcher::divI_proj_mask() {
2113 ShouldNotReachHere();
2114 return RegMask::EMPTY;
2115 }
2116
2117 // Register for MODI projection of divmodI.
2118 const RegMask& Matcher::modI_proj_mask() {
2119 ShouldNotReachHere();
2120 return RegMask::EMPTY;
2121 }
2122
2123 // Register for DIVL projection of divmodL.
2124 const RegMask& Matcher::divL_proj_mask() {
2125 ShouldNotReachHere();
2126 return RegMask::EMPTY;
2127 }
2128
2129 // Register for MODL projection of divmodL.
2130 const RegMask& Matcher::modL_proj_mask() {
2131 ShouldNotReachHere();
2132 return RegMask::EMPTY;
2133 }
2134
2135 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2136 assert_cond(addp != nullptr);
2137 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2138 Node* u = addp->fast_out(i);
2139 if (u != nullptr && u->is_Mem()) {
2140 int opsize = u->as_Mem()->memory_size();
2141 assert(opsize > 0, "unexpected memory operand size");
2142 if (u->as_Mem()->memory_size() != (1 << shift)) {
2143 return false;
2144 }
2145 }
2146 }
2147 return true;
2148 }
2149
2150 // Binary src (Replicate scalar/immediate)
2151 static bool is_vector_scalar_bitwise_pattern(Node* n, Node* m) {
2152 if (n == nullptr || m == nullptr) {
2153 return false;
2154 }
2155
2156 if (m->Opcode() != Op_Replicate) {
2157 return false;
2158 }
2159
2160 switch (n->Opcode()) {
2161 case Op_AndV:
2162 case Op_OrV:
2163 case Op_XorV:
2164 case Op_AddVB:
2165 case Op_AddVS:
2166 case Op_AddVI:
2167 case Op_AddVL:
2168 case Op_SubVB:
2169 case Op_SubVS:
2170 case Op_SubVI:
2171 case Op_SubVL:
2172 case Op_MulVB:
2173 case Op_MulVS:
2174 case Op_MulVI:
2175 case Op_MulVL: {
2176 return true;
2177 }
2178 default:
2179 return false;
2180 }
2181 }
2182
2183 // (XorV src (Replicate m1))
2184 // (XorVMask src (MaskAll m1))
2185 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
2186 if (n != nullptr && m != nullptr) {
2187 return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
2188 VectorNode::is_all_ones_vector(m);
2189 }
2190 return false;
2191 }
2192
2193 // Should the Matcher clone input 'm' of node 'n'?
2194 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2195 assert_cond(m != nullptr);
2196 if (is_vshift_con_pattern(n, m) || // ShiftV src (ShiftCntV con)
2197 is_vector_bitwise_not_pattern(n, m) ||
2198 is_vector_scalar_bitwise_pattern(n, m) ||
2199 is_encode_and_store_pattern(n, m)) {
2200 mstack.push(m, Visit);
2201 return true;
2202 }
2203 return false;
2204 }
2205
2206 // Should the Matcher clone shifts on addressing modes, expecting them
2207 // to be subsumed into complex addressing expressions or compute them
2208 // into registers?
2209 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2210 return clone_base_plus_offset_address(m, mstack, address_visited);
2211 }
2212
2213 %}
2214
2215
2216
2217 //----------ENCODING BLOCK-----------------------------------------------------
2218 // This block specifies the encoding classes used by the compiler to
2219 // output byte streams. Encoding classes are parameterized macros
2220 // used by Machine Instruction Nodes in order to generate the bit
2221 // encoding of the instruction. Operands specify their base encoding
2222 // interface with the interface keyword. There are currently
2223 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2224 // COND_INTER. REG_INTER causes an operand to generate a function
2225 // which returns its register number when queried. CONST_INTER causes
2226 // an operand to generate a function which returns the value of the
2227 // constant when queried. MEMORY_INTER causes an operand to generate
2228 // four functions which return the Base Register, the Index Register,
2229 // the Scale Value, and the Offset Value of the operand when queried.
2230 // COND_INTER causes an operand to generate six functions which return
2231 // the encoding code (ie - encoding bits for the instruction)
2232 // associated with each basic boolean condition for a conditional
2233 // instruction.
2234 //
2235 // Instructions specify two basic values for encoding. Again, a
2236 // function is available to check if the constant displacement is an
2237 // oop. They use the ins_encode keyword to specify their encoding
2238 // classes (which must be a sequence of enc_class names, and their
2239 // parameters, specified in the encoding block), and they use the
2240 // opcode keyword to specify, in order, their primary, secondary, and
2241 // tertiary opcode. Only the opcode sections which a particular
2242 // instruction needs for encoding need to be specified.
2243 encode %{
2244 // BEGIN Non-volatile memory access
2245
2246 enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
2247 int64_t con = (int64_t)$src$$constant;
2248 Register dst_reg = as_Register($dst$$reg);
2249 __ mv(dst_reg, con);
2250 %}
2251
2252 enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
2253 Register dst_reg = as_Register($dst$$reg);
2254 address con = (address)$src$$constant;
2255 if (con == nullptr || con == (address)1) {
2256 ShouldNotReachHere();
2257 } else {
2258 relocInfo::relocType rtype = $src->constant_reloc();
2259 if (rtype == relocInfo::oop_type) {
2260 __ movoop(dst_reg, (jobject)con);
2261 } else if (rtype == relocInfo::metadata_type) {
2262 __ mov_metadata(dst_reg, (Metadata*)con);
2263 } else {
2264 assert(rtype == relocInfo::none || rtype == relocInfo::external_word_type, "unexpected reloc type");
2265 __ mv(dst_reg, $src$$constant);
2266 }
2267 }
2268 %}
2269
2270 enc_class riscv_enc_mov_p1(iRegP dst) %{
2271 Register dst_reg = as_Register($dst$$reg);
2272 __ mv(dst_reg, 1);
2273 %}
2274
2275 enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
2276 Register dst_reg = as_Register($dst$$reg);
2277 address con = (address)$src$$constant;
2278 if (con == nullptr) {
2279 ShouldNotReachHere();
2280 } else {
2281 relocInfo::relocType rtype = $src->constant_reloc();
2282 assert(rtype == relocInfo::oop_type, "unexpected reloc type");
2283 __ set_narrow_oop(dst_reg, (jobject)con);
2284 }
2285 %}
2286
2287 enc_class riscv_enc_mov_zero(iRegNorP dst) %{
2288 Register dst_reg = as_Register($dst$$reg);
2289 __ mv(dst_reg, zr);
2290 %}
2291
2292 enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
2293 Register dst_reg = as_Register($dst$$reg);
2294 address con = (address)$src$$constant;
2295 if (con == nullptr) {
2296 ShouldNotReachHere();
2297 } else {
2298 relocInfo::relocType rtype = $src->constant_reloc();
2299 assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
2300 __ set_narrow_klass(dst_reg, (Klass *)con);
2301 }
2302 %}
2303
2304 // compare and branch instruction encodings
2305
2306 enc_class riscv_enc_j(label lbl) %{
2307 Label* L = $lbl$$label;
2308 __ j(*L);
2309 %}
2310
2311 enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
2312 Label* L = $lbl$$label;
2313 switch ($cmp$$cmpcode) {
2314 case(BoolTest::ge):
2315 __ j(*L);
2316 break;
2317 case(BoolTest::lt):
2318 break;
2319 default:
2320 Unimplemented();
2321 }
2322 %}
2323
2324 // call instruction encodings
2325
2326 enc_class riscv_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) %{
2327 Register sub_reg = as_Register($sub$$reg);
2328 Register super_reg = as_Register($super$$reg);
2329 Register temp_reg = as_Register($temp$$reg);
2330 Register result_reg = as_Register($result$$reg);
2331 Register cr_reg = t1;
2332
2333 Label miss;
2334 Label done;
2335 __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
2336 nullptr, &miss, /*set_cond_codes*/ true);
2337 if ($primary) {
2338 __ mv(result_reg, zr);
2339 } else {
2340 __ mv(cr_reg, zr);
2341 __ j(done);
2342 }
2343
2344 __ bind(miss);
2345 if (!$primary) {
2346 __ mv(cr_reg, 1);
2347 }
2348
2349 __ bind(done);
2350 %}
2351
2352 enc_class riscv_enc_java_static_call(method meth) %{
2353 Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
2354
2355 address addr = (address)$meth$$method;
2356 address call = nullptr;
2357 assert_cond(addr != nullptr);
2358 if (!_method) {
2359 // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
2360 call = __ reloc_call(Address(addr, relocInfo::runtime_call_type));
2361 if (call == nullptr) {
2362 ciEnv::current()->record_failure("CodeCache is full");
2363 return;
2364 }
2365 } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
2366 // The NOP here is purely to ensure that eliding a call to
2367 // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
2368 __ nop();
2369 __ nop();
2370 __ nop();
2371 __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
2372 } else {
2373 int method_index = resolved_method_index(masm);
2374 RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
2375 : static_call_Relocation::spec(method_index);
2376 call = __ reloc_call(Address(addr, rspec));
2377 if (call == nullptr) {
2378 ciEnv::current()->record_failure("CodeCache is full");
2379 return;
2380 }
2381
2382 if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
2383 // Calls of the same statically bound method can share
2384 // a stub to the interpreter.
2385 __ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
2386 } else {
2387 // Emit stub for static call
2388 address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
2389 if (stub == nullptr) {
2390 ciEnv::current()->record_failure("CodeCache is full");
2391 return;
2392 }
2393 }
2394 }
2395
2396 __ post_call_nop();
2397 %}
2398
2399 enc_class riscv_enc_java_dynamic_call(method meth) %{
2400 Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
2401 int method_index = resolved_method_index(masm);
2402 address call = __ ic_call((address)$meth$$method, method_index);
2403 if (call == nullptr) {
2404 ciEnv::current()->record_failure("CodeCache is full");
2405 return;
2406 }
2407
2408 __ post_call_nop();
2409 %}
2410
2411 enc_class riscv_enc_call_epilog() %{
2412 if (VerifyStackAtCalls) {
2413 // Check that stack depth is unchanged: find majik cookie on stack
2414 int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3 * VMRegImpl::slots_per_word));
2415 Label stack_ok;
2416 __ ld(t1, Address(sp, framesize));
2417 __ mv(t2, MAJIK_DWORD);
2418 __ beq(t2, t1, stack_ok);
2419 __ stop("MAJIK_DWORD not found");
2420 __ bind(stack_ok);
2421 }
2422 %}
2423
2424 enc_class riscv_enc_java_to_runtime(method meth) %{
2425 Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
2426
2427 // Some calls to generated routines (arraycopy code) are scheduled by C2
2428 // as runtime calls. if so we can call them using a far call (they will be
2429 // in the code cache, thus in a reachable segment) otherwise we have to use
2430 // a movptr+jalr pair which loads the absolute address into a register.
2431 address entry = (address)$meth$$method;
2432 if (CodeCache::contains(entry)) {
2433 __ far_call(Address(entry, relocInfo::runtime_call_type));
2434 __ post_call_nop();
2435 } else {
2436 Label retaddr;
2437 // Make the anchor frame walkable
2438 __ la(t0, retaddr);
2439 __ sd(t0, Address(xthread, JavaThread::last_Java_pc_offset()));
2440 int32_t offset = 0;
2441 // No relocation needed
2442 __ movptr(t1, entry, offset, t0); // lui + lui + slli + add
2443 __ jalr(t1, offset);
2444 __ bind(retaddr);
2445 __ post_call_nop();
2446 }
2447 %}
2448
2449 enc_class riscv_enc_tail_call(iRegP jump_target) %{
2450 Register target_reg = as_Register($jump_target$$reg);
2451 __ jr(target_reg);
2452 %}
2453
2454 enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
2455 Register target_reg = as_Register($jump_target$$reg);
2456 // exception oop should be in x10
2457 // ret addr has been popped into ra
2458 // callee expects it in x13
2459 __ mv(x13, ra);
2460 __ jr(target_reg);
2461 %}
2462
2463 enc_class riscv_enc_rethrow() %{
2464 __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
2465 %}
2466
2467 enc_class riscv_enc_ret() %{
2468 __ ret();
2469 %}
2470
2471 %}
2472
2473 //----------FRAME--------------------------------------------------------------
2474 // Definition of frame structure and management information.
2475 //
2476 // S T A C K L A Y O U T Allocators stack-slot number
2477 // | (to get allocators register number
2478 // G Owned by | | v add OptoReg::stack0())
2479 // r CALLER | |
2480 // o | +--------+ pad to even-align allocators stack-slot
2481 // w V | pad0 | numbers; owned by CALLER
2482 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
2483 // h ^ | in | 5
2484 // | | args | 4 Holes in incoming args owned by SELF
2485 // | | | | 3
2486 // | | +--------+
2487 // V | | old out| Empty on Intel, window on Sparc
2488 // | old |preserve| Must be even aligned.
2489 // | SP-+--------+----> Matcher::_old_SP, even aligned
2490 // | | in | 3 area for Intel ret address
2491 // Owned by |preserve| Empty on Sparc.
2492 // SELF +--------+
2493 // | | pad2 | 2 pad to align old SP
2494 // | +--------+ 1
2495 // | | locks | 0
2496 // | +--------+----> OptoReg::stack0(), even aligned
2497 // | | pad1 | 11 pad to align new SP
2498 // | +--------+
2499 // | | | 10
2500 // | | spills | 9 spills
2501 // V | | 8 (pad0 slot for callee)
2502 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
2503 // ^ | out | 7
2504 // | | args | 6 Holes in outgoing args owned by CALLEE
2505 // Owned by +--------+
2506 // CALLEE | new out| 6 Empty on Intel, window on Sparc
2507 // | new |preserve| Must be even-aligned.
2508 // | SP-+--------+----> Matcher::_new_SP, even aligned
2509 // | | |
2510 //
2511 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
2512 // known from SELF's arguments and the Java calling convention.
2513 // Region 6-7 is determined per call site.
2514 // Note 2: If the calling convention leaves holes in the incoming argument
2515 // area, those holes are owned by SELF. Holes in the outgoing area
2516 // are owned by the CALLEE. Holes should not be necessary in the
2517 // incoming area, as the Java calling convention is completely under
2518 // the control of the AD file. Doubles can be sorted and packed to
2519 // avoid holes. Holes in the outgoing arguments may be necessary for
2520 // varargs C calling conventions.
2521 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
2522 // even aligned with pad0 as needed.
2523 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
2524 // (the latter is true on Intel but is it false on RISCV?)
2525 // region 6-11 is even aligned; it may be padded out more so that
2526 // the region from SP to FP meets the minimum stack alignment.
2527 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
2528 // alignment. Region 11, pad1, may be dynamically extended so that
2529 // SP meets the minimum alignment.
2530
2531 frame %{
2532 // These three registers define part of the calling convention
2533 // between compiled code and the interpreter.
2534
2535 // Inline Cache Register or methodOop for I2C.
2536 inline_cache_reg(R31);
2537
2538 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
2539 cisc_spilling_operand_name(indOffset);
2540
2541 // Number of stack slots consumed by locking an object
2542 // generate Compile::sync_stack_slots
2543 // VMRegImpl::slots_per_word = wordSize / stack_slot_size = 8 / 4 = 2
2544 sync_stack_slots(1 * VMRegImpl::slots_per_word);
2545
2546 // Compiled code's Frame Pointer
2547 frame_pointer(R2);
2548
2549 // Stack alignment requirement
2550 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
2551
2552 // Number of outgoing stack slots killed above the out_preserve_stack_slots
2553 // for calls to C. Supports the var-args backing area for register parms.
2554 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes / BytesPerInt);
2555
2556 // The after-PROLOG location of the return address. Location of
2557 // return address specifies a type (REG or STACK) and a number
2558 // representing the register number (i.e. - use a register name) or
2559 // stack slot.
2560 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
2561 // Otherwise, it is above the locks and verification slot and alignment word
2562 // TODO this may well be correct but need to check why that - 2 is there
2563 // ppc port uses 0 but we definitely need to allow for fixed_slots
2564 // which folds in the space used for monitors
2565 return_addr(STACK - 2 +
2566 align_up((Compile::current()->in_preserve_stack_slots() +
2567 Compile::current()->fixed_slots()),
2568 stack_alignment_in_slots()));
2569
2570 // Location of compiled Java return values. Same as C for now.
2571 return_value
2572 %{
2573 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
2574 "only return normal values");
2575
2576 static const int lo[Op_RegL + 1] = { // enum name
2577 0, // Op_Node
2578 0, // Op_Set
2579 R10_num, // Op_RegN
2580 R10_num, // Op_RegI
2581 R10_num, // Op_RegP
2582 F10_num, // Op_RegF
2583 F10_num, // Op_RegD
2584 R10_num // Op_RegL
2585 };
2586
2587 static const int hi[Op_RegL + 1] = { // enum name
2588 0, // Op_Node
2589 0, // Op_Set
2590 OptoReg::Bad, // Op_RegN
2591 OptoReg::Bad, // Op_RegI
2592 R10_H_num, // Op_RegP
2593 OptoReg::Bad, // Op_RegF
2594 F10_H_num, // Op_RegD
2595 R10_H_num // Op_RegL
2596 };
2597
2598 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
2599 %}
2600 %}
2601
2602 //----------ATTRIBUTES---------------------------------------------------------
2603 //----------Operand Attributes-------------------------------------------------
2604 op_attrib op_cost(1); // Required cost attribute
2605
2606 //----------Instruction Attributes---------------------------------------------
2607 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
2608 ins_attrib ins_size(32); // Required size attribute (in bits)
2609 ins_attrib ins_short_branch(0); // Required flag: is this instruction
2610 // a non-matching short branch variant
2611 // of some long branch?
2612 ins_attrib ins_alignment(4); // Required alignment attribute (must
2613 // be a power of 2) specifies the
2614 // alignment that some part of the
2615 // instruction (not necessarily the
2616 // start) requires. If > 1, a
2617 // compute_padding() function must be
2618 // provided for the instruction
2619
2620 // Whether this node is expanded during code emission into a sequence of
2621 // instructions and the first instruction can perform an implicit null check.
2622 ins_attrib ins_is_late_expanded_null_check_candidate(false);
2623
2624 //----------OPERANDS-----------------------------------------------------------
2625 // Operand definitions must precede instruction definitions for correct parsing
2626 // in the ADLC because operands constitute user defined types which are used in
2627 // instruction definitions.
2628
2629 //----------Simple Operands----------------------------------------------------
2630
2631 // Integer operands 32 bit
2632 // 32 bit immediate
2633 operand immI()
2634 %{
2635 match(ConI);
2636
2637 op_cost(0);
2638 format %{ %}
2639 interface(CONST_INTER);
2640 %}
2641
2642 // 32 bit zero
2643 operand immI0()
2644 %{
2645 predicate(n->get_int() == 0);
2646 match(ConI);
2647
2648 op_cost(0);
2649 format %{ %}
2650 interface(CONST_INTER);
2651 %}
2652
2653 // 32 bit unit increment
2654 operand immI_1()
2655 %{
2656 predicate(n->get_int() == 1);
2657 match(ConI);
2658
2659 op_cost(0);
2660 format %{ %}
2661 interface(CONST_INTER);
2662 %}
2663
2664 // 32 bit unit decrement
2665 operand immI_M1()
2666 %{
2667 predicate(n->get_int() == -1);
2668 match(ConI);
2669
2670 op_cost(0);
2671 format %{ %}
2672 interface(CONST_INTER);
2673 %}
2674
2675 // Unsigned Integer Immediate: 6-bit int, greater than 32
2676 operand uimmI6_ge32() %{
2677 predicate(((unsigned int)(n->get_int()) < 64) && (n->get_int() >= 32));
2678 match(ConI);
2679 op_cost(0);
2680 format %{ %}
2681 interface(CONST_INTER);
2682 %}
2683
2684 operand immI_le_4()
2685 %{
2686 predicate(n->get_int() <= 4);
2687 match(ConI);
2688
2689 op_cost(0);
2690 format %{ %}
2691 interface(CONST_INTER);
2692 %}
2693
2694 operand immI_16()
2695 %{
2696 predicate(n->get_int() == 16);
2697 match(ConI);
2698 op_cost(0);
2699 format %{ %}
2700 interface(CONST_INTER);
2701 %}
2702
2703 operand immI_24()
2704 %{
2705 predicate(n->get_int() == 24);
2706 match(ConI);
2707 op_cost(0);
2708 format %{ %}
2709 interface(CONST_INTER);
2710 %}
2711
2712 operand immI_31()
2713 %{
2714 predicate(n->get_int() == 31);
2715 match(ConI);
2716
2717 op_cost(0);
2718 format %{ %}
2719 interface(CONST_INTER);
2720 %}
2721
2722 operand immI_63()
2723 %{
2724 predicate(n->get_int() == 63);
2725 match(ConI);
2726
2727 op_cost(0);
2728 format %{ %}
2729 interface(CONST_INTER);
2730 %}
2731
2732 // 32 bit integer valid for add immediate
2733 operand immIAdd()
2734 %{
2735 predicate(Assembler::is_simm12((int64_t)n->get_int()));
2736 match(ConI);
2737 op_cost(0);
2738 format %{ %}
2739 interface(CONST_INTER);
2740 %}
2741
2742 // 32 bit integer valid for sub immediate
2743 operand immISub()
2744 %{
2745 predicate(Assembler::is_simm12(-(int64_t)n->get_int()));
2746 match(ConI);
2747 op_cost(0);
2748 format %{ %}
2749 interface(CONST_INTER);
2750 %}
2751
2752 // 5 bit signed value.
2753 operand immI5()
2754 %{
2755 predicate(n->get_int() <= 15 && n->get_int() >= -16);
2756 match(ConI);
2757
2758 op_cost(0);
2759 format %{ %}
2760 interface(CONST_INTER);
2761 %}
2762
2763 // 5 bit signed value (simm5)
2764 operand immL5()
2765 %{
2766 predicate(n->get_long() <= 15 && n->get_long() >= -16);
2767 match(ConL);
2768
2769 op_cost(0);
2770 format %{ %}
2771 interface(CONST_INTER);
2772 %}
2773
2774 // Integer operands 64 bit
2775 // 64 bit immediate
2776 operand immL()
2777 %{
2778 match(ConL);
2779
2780 op_cost(0);
2781 format %{ %}
2782 interface(CONST_INTER);
2783 %}
2784
2785 // 64 bit zero
2786 operand immL0()
2787 %{
2788 predicate(n->get_long() == 0);
2789 match(ConL);
2790
2791 op_cost(0);
2792 format %{ %}
2793 interface(CONST_INTER);
2794 %}
2795
2796 // Pointer operands
2797 // Pointer Immediate
2798 operand immP()
2799 %{
2800 match(ConP);
2801
2802 op_cost(0);
2803 format %{ %}
2804 interface(CONST_INTER);
2805 %}
2806
2807 // Null Pointer Immediate
2808 operand immP0()
2809 %{
2810 predicate(n->get_ptr() == 0);
2811 match(ConP);
2812
2813 op_cost(0);
2814 format %{ %}
2815 interface(CONST_INTER);
2816 %}
2817
2818 // Pointer Immediate One
2819 // this is used in object initialization (initial object header)
2820 operand immP_1()
2821 %{
2822 predicate(n->get_ptr() == 1);
2823 match(ConP);
2824
2825 op_cost(0);
2826 format %{ %}
2827 interface(CONST_INTER);
2828 %}
2829
2830 // Int Immediate: low 16-bit mask
2831 operand immI_16bits()
2832 %{
2833 predicate(n->get_int() == 0xFFFF);
2834 match(ConI);
2835 op_cost(0);
2836 format %{ %}
2837 interface(CONST_INTER);
2838 %}
2839
2840 operand immIpowerOf2() %{
2841 predicate(is_power_of_2((juint)(n->get_int())));
2842 match(ConI);
2843 op_cost(0);
2844 format %{ %}
2845 interface(CONST_INTER);
2846 %}
2847
2848 // Long Immediate: low 32-bit mask
2849 operand immL_32bits()
2850 %{
2851 predicate(n->get_long() == 0xFFFFFFFFL);
2852 match(ConL);
2853 op_cost(0);
2854 format %{ %}
2855 interface(CONST_INTER);
2856 %}
2857
2858 // 64 bit unit decrement
2859 operand immL_M1()
2860 %{
2861 predicate(n->get_long() == -1);
2862 match(ConL);
2863
2864 op_cost(0);
2865 format %{ %}
2866 interface(CONST_INTER);
2867 %}
2868
2869
2870 // 64 bit integer valid for add immediate
2871 operand immLAdd()
2872 %{
2873 predicate(Assembler::is_simm12(n->get_long()));
2874 match(ConL);
2875 op_cost(0);
2876 format %{ %}
2877 interface(CONST_INTER);
2878 %}
2879
2880 // 64 bit integer valid for sub immediate
2881 operand immLSub()
2882 %{
2883 predicate(Assembler::is_simm12(-(n->get_long())));
2884 match(ConL);
2885 op_cost(0);
2886 format %{ %}
2887 interface(CONST_INTER);
2888 %}
2889
2890 // Narrow pointer operands
2891 // Narrow Pointer Immediate
2892 operand immN()
2893 %{
2894 match(ConN);
2895
2896 op_cost(0);
2897 format %{ %}
2898 interface(CONST_INTER);
2899 %}
2900
2901 // Narrow Null Pointer Immediate
2902 operand immN0()
2903 %{
2904 predicate(n->get_narrowcon() == 0);
2905 match(ConN);
2906
2907 op_cost(0);
2908 format %{ %}
2909 interface(CONST_INTER);
2910 %}
2911
2912 operand immNKlass()
2913 %{
2914 match(ConNKlass);
2915
2916 op_cost(0);
2917 format %{ %}
2918 interface(CONST_INTER);
2919 %}
2920
2921 // Float and Double operands
2922 // Double Immediate
2923 operand immD()
2924 %{
2925 match(ConD);
2926 op_cost(0);
2927 format %{ %}
2928 interface(CONST_INTER);
2929 %}
2930
2931 // Double Immediate: +0.0d
2932 operand immD0()
2933 %{
2934 predicate(jlong_cast(n->getd()) == 0);
2935 match(ConD);
2936
2937 op_cost(0);
2938 format %{ %}
2939 interface(CONST_INTER);
2940 %}
2941
2942 // Float Immediate
2943 operand immF()
2944 %{
2945 match(ConF);
2946 op_cost(0);
2947 format %{ %}
2948 interface(CONST_INTER);
2949 %}
2950
2951 // Float Immediate: +0.0f.
2952 operand immF0()
2953 %{
2954 predicate(jint_cast(n->getf()) == 0);
2955 match(ConF);
2956
2957 op_cost(0);
2958 format %{ %}
2959 interface(CONST_INTER);
2960 %}
2961
2962 // Half Float Immediate
2963 operand immH()
2964 %{
2965 match(ConH);
2966
2967 op_cost(0);
2968 format %{ %}
2969 interface(CONST_INTER);
2970 %}
2971
2972 // Half Float Immediate: +0.0f.
2973 operand immH0()
2974 %{
2975 predicate(jint_cast(n->geth()) == 0);
2976 match(ConH);
2977
2978 op_cost(0);
2979 format %{ %}
2980 interface(CONST_INTER);
2981 %}
2982
2983 operand immIOffset()
2984 %{
2985 predicate(Assembler::is_simm12(n->get_int()));
2986 match(ConI);
2987 op_cost(0);
2988 format %{ %}
2989 interface(CONST_INTER);
2990 %}
2991
2992 operand immLOffset()
2993 %{
2994 predicate(Assembler::is_simm12(n->get_long()));
2995 match(ConL);
2996 op_cost(0);
2997 format %{ %}
2998 interface(CONST_INTER);
2999 %}
3000
3001 // Scale values
3002 operand immIScale()
3003 %{
3004 predicate(1 <= n->get_int() && (n->get_int() <= 3));
3005 match(ConI);
3006
3007 op_cost(0);
3008 format %{ %}
3009 interface(CONST_INTER);
3010 %}
3011
3012 // Integer 32 bit Register Operands
3013 operand iRegI()
3014 %{
3015 constraint(ALLOC_IN_RC(any_reg32));
3016 match(RegI);
3017 match(iRegINoSp);
3018 op_cost(0);
3019 format %{ %}
3020 interface(REG_INTER);
3021 %}
3022
3023 // Integer 32 bit Register not Special
3024 operand iRegINoSp()
3025 %{
3026 constraint(ALLOC_IN_RC(no_special_reg32));
3027 match(RegI);
3028 op_cost(0);
3029 format %{ %}
3030 interface(REG_INTER);
3031 %}
3032
3033 // Register R10 only
3034 operand iRegI_R10()
3035 %{
3036 constraint(ALLOC_IN_RC(int_r10_reg));
3037 match(RegI);
3038 match(iRegINoSp);
3039 op_cost(0);
3040 format %{ %}
3041 interface(REG_INTER);
3042 %}
3043
3044 // Register R12 only
3045 operand iRegI_R12()
3046 %{
3047 constraint(ALLOC_IN_RC(int_r12_reg));
3048 match(RegI);
3049 match(iRegINoSp);
3050 op_cost(0);
3051 format %{ %}
3052 interface(REG_INTER);
3053 %}
3054
3055 // Register R13 only
3056 operand iRegI_R13()
3057 %{
3058 constraint(ALLOC_IN_RC(int_r13_reg));
3059 match(RegI);
3060 match(iRegINoSp);
3061 op_cost(0);
3062 format %{ %}
3063 interface(REG_INTER);
3064 %}
3065
3066 // Register R14 only
3067 operand iRegI_R14()
3068 %{
3069 constraint(ALLOC_IN_RC(int_r14_reg));
3070 match(RegI);
3071 match(iRegINoSp);
3072 op_cost(0);
3073 format %{ %}
3074 interface(REG_INTER);
3075 %}
3076
3077 // Integer 64 bit Register Operands
3078 operand iRegL()
3079 %{
3080 constraint(ALLOC_IN_RC(any_reg));
3081 match(RegL);
3082 match(iRegLNoSp);
3083 op_cost(0);
3084 format %{ %}
3085 interface(REG_INTER);
3086 %}
3087
3088 // Integer 64 bit Register not Special
3089 operand iRegLNoSp()
3090 %{
3091 constraint(ALLOC_IN_RC(no_special_reg));
3092 match(RegL);
3093 match(iRegL_R10);
3094 format %{ %}
3095 interface(REG_INTER);
3096 %}
3097
3098 // Long 64 bit Register R29 only
3099 operand iRegL_R29()
3100 %{
3101 constraint(ALLOC_IN_RC(r29_reg));
3102 match(RegL);
3103 match(iRegLNoSp);
3104 op_cost(0);
3105 format %{ %}
3106 interface(REG_INTER);
3107 %}
3108
3109 // Long 64 bit Register R30 only
3110 operand iRegL_R30()
3111 %{
3112 constraint(ALLOC_IN_RC(r30_reg));
3113 match(RegL);
3114 match(iRegLNoSp);
3115 op_cost(0);
3116 format %{ %}
3117 interface(REG_INTER);
3118 %}
3119
3120 // Pointer Register Operands
3121 // Pointer Register
3122 operand iRegP()
3123 %{
3124 constraint(ALLOC_IN_RC(ptr_reg));
3125 match(RegP);
3126 match(iRegPNoSp);
3127 match(iRegP_R10);
3128 match(iRegP_R15);
3129 match(javaThread_RegP);
3130 op_cost(0);
3131 format %{ %}
3132 interface(REG_INTER);
3133 %}
3134
3135 // Pointer 64 bit Register not Special
3136 operand iRegPNoSp()
3137 %{
3138 constraint(ALLOC_IN_RC(no_special_ptr_reg));
3139 match(RegP);
3140 op_cost(0);
3141 format %{ %}
3142 interface(REG_INTER);
3143 %}
3144
3145 // This operand is not allowed to use fp even if
3146 // fp is not used to hold the frame pointer.
3147 operand iRegPNoSpNoFp()
3148 %{
3149 constraint(ALLOC_IN_RC(no_special_no_fp_ptr_reg));
3150 match(RegP);
3151 match(iRegPNoSp);
3152 op_cost(0);
3153 format %{ %}
3154 interface(REG_INTER);
3155 %}
3156
3157 operand iRegP_R10()
3158 %{
3159 constraint(ALLOC_IN_RC(r10_reg));
3160 match(RegP);
3161 // match(iRegP);
3162 match(iRegPNoSp);
3163 op_cost(0);
3164 format %{ %}
3165 interface(REG_INTER);
3166 %}
3167
3168 // Pointer 64 bit Register R11 only
3169 operand iRegP_R11()
3170 %{
3171 constraint(ALLOC_IN_RC(r11_reg));
3172 match(RegP);
3173 match(iRegPNoSp);
3174 op_cost(0);
3175 format %{ %}
3176 interface(REG_INTER);
3177 %}
3178
3179 operand iRegP_R12()
3180 %{
3181 constraint(ALLOC_IN_RC(r12_reg));
3182 match(RegP);
3183 // match(iRegP);
3184 match(iRegPNoSp);
3185 op_cost(0);
3186 format %{ %}
3187 interface(REG_INTER);
3188 %}
3189
3190 // Pointer 64 bit Register R13 only
3191 operand iRegP_R13()
3192 %{
3193 constraint(ALLOC_IN_RC(r13_reg));
3194 match(RegP);
3195 match(iRegPNoSp);
3196 op_cost(0);
3197 format %{ %}
3198 interface(REG_INTER);
3199 %}
3200
3201 operand iRegP_R14()
3202 %{
3203 constraint(ALLOC_IN_RC(r14_reg));
3204 match(RegP);
3205 // match(iRegP);
3206 match(iRegPNoSp);
3207 op_cost(0);
3208 format %{ %}
3209 interface(REG_INTER);
3210 %}
3211
3212 operand iRegP_R15()
3213 %{
3214 constraint(ALLOC_IN_RC(r15_reg));
3215 match(RegP);
3216 // match(iRegP);
3217 match(iRegPNoSp);
3218 op_cost(0);
3219 format %{ %}
3220 interface(REG_INTER);
3221 %}
3222
3223 operand iRegP_R16()
3224 %{
3225 constraint(ALLOC_IN_RC(r16_reg));
3226 match(RegP);
3227 match(iRegPNoSp);
3228 op_cost(0);
3229 format %{ %}
3230 interface(REG_INTER);
3231 %}
3232
3233 // Pointer 64 bit Register R28 only
3234 operand iRegP_R28()
3235 %{
3236 constraint(ALLOC_IN_RC(r28_reg));
3237 match(RegP);
3238 match(iRegPNoSp);
3239 op_cost(0);
3240 format %{ %}
3241 interface(REG_INTER);
3242 %}
3243
3244 // Pointer 64 bit Register R30 only
3245 operand iRegP_R30()
3246 %{
3247 constraint(ALLOC_IN_RC(r30_reg));
3248 match(RegP);
3249 match(iRegPNoSp);
3250 op_cost(0);
3251 format %{ %}
3252 interface(REG_INTER);
3253 %}
3254
3255 // Pointer 64 bit Register R31 only
3256 operand iRegP_R31()
3257 %{
3258 constraint(ALLOC_IN_RC(r31_reg));
3259 match(RegP);
3260 match(iRegPNoSp);
3261 op_cost(0);
3262 format %{ %}
3263 interface(REG_INTER);
3264 %}
3265
3266 // Pointer Register Operands
3267 // Narrow Pointer Register
3268 operand iRegN()
3269 %{
3270 constraint(ALLOC_IN_RC(any_reg32));
3271 match(RegN);
3272 match(iRegNNoSp);
3273 op_cost(0);
3274 format %{ %}
3275 interface(REG_INTER);
3276 %}
3277
3278 // Integer 64 bit Register not Special
3279 operand iRegNNoSp()
3280 %{
3281 constraint(ALLOC_IN_RC(no_special_reg32));
3282 match(RegN);
3283 op_cost(0);
3284 format %{ %}
3285 interface(REG_INTER);
3286 %}
3287
3288 // Long 64 bit Register R10 only
3289 operand iRegL_R10()
3290 %{
3291 constraint(ALLOC_IN_RC(r10_reg));
3292 match(RegL);
3293 match(iRegLNoSp);
3294 op_cost(0);
3295 format %{ %}
3296 interface(REG_INTER);
3297 %}
3298
3299 // Float Register
3300 // Float register operands
3301 operand fRegF()
3302 %{
3303 constraint(ALLOC_IN_RC(float_reg));
3304 match(RegF);
3305
3306 op_cost(0);
3307 format %{ %}
3308 interface(REG_INTER);
3309 %}
3310
3311 // Double Register
3312 // Double register operands
3313 operand fRegD()
3314 %{
3315 constraint(ALLOC_IN_RC(double_reg));
3316 match(RegD);
3317
3318 op_cost(0);
3319 format %{ %}
3320 interface(REG_INTER);
3321 %}
3322
3323 // Generic vector class. This will be used for
3324 // all vector operands.
3325 operand vReg()
3326 %{
3327 constraint(ALLOC_IN_RC(vectora_reg));
3328 match(VecA);
3329 op_cost(0);
3330 format %{ %}
3331 interface(REG_INTER);
3332 %}
3333
3334 operand vReg_V1()
3335 %{
3336 constraint(ALLOC_IN_RC(v1_reg));
3337 match(VecA);
3338 match(vReg);
3339 op_cost(0);
3340 format %{ %}
3341 interface(REG_INTER);
3342 %}
3343
3344 operand vReg_V2()
3345 %{
3346 constraint(ALLOC_IN_RC(v2_reg));
3347 match(VecA);
3348 match(vReg);
3349 op_cost(0);
3350 format %{ %}
3351 interface(REG_INTER);
3352 %}
3353
3354 operand vReg_V3()
3355 %{
3356 constraint(ALLOC_IN_RC(v3_reg));
3357 match(VecA);
3358 match(vReg);
3359 op_cost(0);
3360 format %{ %}
3361 interface(REG_INTER);
3362 %}
3363
3364 operand vReg_V4()
3365 %{
3366 constraint(ALLOC_IN_RC(v4_reg));
3367 match(VecA);
3368 match(vReg);
3369 op_cost(0);
3370 format %{ %}
3371 interface(REG_INTER);
3372 %}
3373
3374 operand vReg_V5()
3375 %{
3376 constraint(ALLOC_IN_RC(v5_reg));
3377 match(VecA);
3378 match(vReg);
3379 op_cost(0);
3380 format %{ %}
3381 interface(REG_INTER);
3382 %}
3383
3384 operand vReg_V6()
3385 %{
3386 constraint(ALLOC_IN_RC(v6_reg));
3387 match(VecA);
3388 match(vReg);
3389 op_cost(0);
3390 format %{ %}
3391 interface(REG_INTER);
3392 %}
3393
3394 operand vReg_V7()
3395 %{
3396 constraint(ALLOC_IN_RC(v7_reg));
3397 match(VecA);
3398 match(vReg);
3399 op_cost(0);
3400 format %{ %}
3401 interface(REG_INTER);
3402 %}
3403
3404 operand vReg_V8()
3405 %{
3406 constraint(ALLOC_IN_RC(v8_reg));
3407 match(VecA);
3408 match(vReg);
3409 op_cost(0);
3410 format %{ %}
3411 interface(REG_INTER);
3412 %}
3413
3414 operand vReg_V9()
3415 %{
3416 constraint(ALLOC_IN_RC(v9_reg));
3417 match(VecA);
3418 match(vReg);
3419 op_cost(0);
3420 format %{ %}
3421 interface(REG_INTER);
3422 %}
3423
3424 operand vReg_V10()
3425 %{
3426 constraint(ALLOC_IN_RC(v10_reg));
3427 match(VecA);
3428 match(vReg);
3429 op_cost(0);
3430 format %{ %}
3431 interface(REG_INTER);
3432 %}
3433
3434 operand vReg_V11()
3435 %{
3436 constraint(ALLOC_IN_RC(v11_reg));
3437 match(VecA);
3438 match(vReg);
3439 op_cost(0);
3440 format %{ %}
3441 interface(REG_INTER);
3442 %}
3443
3444 operand vRegMask()
3445 %{
3446 constraint(ALLOC_IN_RC(vmask_reg));
3447 match(RegVectMask);
3448 match(vRegMask_V0);
3449 op_cost(0);
3450 format %{ %}
3451 interface(REG_INTER);
3452 %}
3453
3454 // The mask value used to control execution of a masked
3455 // vector instruction is always supplied by vector register v0.
3456 operand vRegMask_V0()
3457 %{
3458 constraint(ALLOC_IN_RC(vmask_reg_v0));
3459 match(RegVectMask);
3460 match(vRegMask);
3461 op_cost(0);
3462 format %{ %}
3463 interface(REG_INTER);
3464 %}
3465
3466 // Java Thread Register
3467 operand javaThread_RegP(iRegP reg)
3468 %{
3469 constraint(ALLOC_IN_RC(java_thread_reg)); // java_thread_reg
3470 match(reg);
3471 op_cost(0);
3472 format %{ %}
3473 interface(REG_INTER);
3474 %}
3475
3476 //----------Memory Operands----------------------------------------------------
3477 // RISCV has only base_plus_offset and literal address mode, so no need to use
3478 // index and scale. Here set index as 0xffffffff and scale as 0x0.
3479 operand indirect(iRegP reg)
3480 %{
3481 constraint(ALLOC_IN_RC(ptr_reg));
3482 match(reg);
3483 op_cost(0);
3484 format %{ "[$reg]" %}
3485 interface(MEMORY_INTER) %{
3486 base($reg);
3487 index(0xffffffff);
3488 scale(0x0);
3489 disp(0x0);
3490 %}
3491 %}
3492
3493 operand indOffI(iRegP reg, immIOffset off)
3494 %{
3495 constraint(ALLOC_IN_RC(ptr_reg));
3496 match(AddP reg off);
3497 op_cost(0);
3498 format %{ "[$reg, $off]" %}
3499 interface(MEMORY_INTER) %{
3500 base($reg);
3501 index(0xffffffff);
3502 scale(0x0);
3503 disp($off);
3504 %}
3505 %}
3506
3507 operand indOffL(iRegP reg, immLOffset off)
3508 %{
3509 constraint(ALLOC_IN_RC(ptr_reg));
3510 match(AddP reg off);
3511 op_cost(0);
3512 format %{ "[$reg, $off]" %}
3513 interface(MEMORY_INTER) %{
3514 base($reg);
3515 index(0xffffffff);
3516 scale(0x0);
3517 disp($off);
3518 %}
3519 %}
3520
3521 operand indirectN(iRegN reg)
3522 %{
3523 predicate(CompressedOops::shift() == 0);
3524 constraint(ALLOC_IN_RC(ptr_reg));
3525 match(DecodeN reg);
3526 op_cost(0);
3527 format %{ "[$reg]\t# narrow" %}
3528 interface(MEMORY_INTER) %{
3529 base($reg);
3530 index(0xffffffff);
3531 scale(0x0);
3532 disp(0x0);
3533 %}
3534 %}
3535
3536 operand indOffIN(iRegN reg, immIOffset off)
3537 %{
3538 predicate(CompressedOops::shift() == 0);
3539 constraint(ALLOC_IN_RC(ptr_reg));
3540 match(AddP (DecodeN reg) off);
3541 op_cost(0);
3542 format %{ "[$reg, $off]\t# narrow" %}
3543 interface(MEMORY_INTER) %{
3544 base($reg);
3545 index(0xffffffff);
3546 scale(0x0);
3547 disp($off);
3548 %}
3549 %}
3550
3551 operand indOffLN(iRegN reg, immLOffset off)
3552 %{
3553 predicate(CompressedOops::shift() == 0);
3554 constraint(ALLOC_IN_RC(ptr_reg));
3555 match(AddP (DecodeN reg) off);
3556 op_cost(0);
3557 format %{ "[$reg, $off]\t# narrow" %}
3558 interface(MEMORY_INTER) %{
3559 base($reg);
3560 index(0xffffffff);
3561 scale(0x0);
3562 disp($off);
3563 %}
3564 %}
3565
3566 //----------Special Memory Operands--------------------------------------------
3567 // Stack Slot Operand - This operand is used for loading and storing temporary
3568 // values on the stack where a match requires a value to
3569 // flow through memory.
3570 operand stackSlotI(sRegI reg)
3571 %{
3572 constraint(ALLOC_IN_RC(stack_slots));
3573 // No match rule because this operand is only generated in matching
3574 // match(RegI);
3575 format %{ "[$reg]" %}
3576 interface(MEMORY_INTER) %{
3577 base(0x02); // RSP
3578 index(0xffffffff); // No Index
3579 scale(0x0); // No Scale
3580 disp($reg); // Stack Offset
3581 %}
3582 %}
3583
3584 operand stackSlotF(sRegF reg)
3585 %{
3586 constraint(ALLOC_IN_RC(stack_slots));
3587 // No match rule because this operand is only generated in matching
3588 // match(RegF);
3589 format %{ "[$reg]" %}
3590 interface(MEMORY_INTER) %{
3591 base(0x02); // RSP
3592 index(0xffffffff); // No Index
3593 scale(0x0); // No Scale
3594 disp($reg); // Stack Offset
3595 %}
3596 %}
3597
3598 operand stackSlotD(sRegD reg)
3599 %{
3600 constraint(ALLOC_IN_RC(stack_slots));
3601 // No match rule because this operand is only generated in matching
3602 // match(RegD);
3603 format %{ "[$reg]" %}
3604 interface(MEMORY_INTER) %{
3605 base(0x02); // RSP
3606 index(0xffffffff); // No Index
3607 scale(0x0); // No Scale
3608 disp($reg); // Stack Offset
3609 %}
3610 %}
3611
3612 operand stackSlotL(sRegL reg)
3613 %{
3614 constraint(ALLOC_IN_RC(stack_slots));
3615 // No match rule because this operand is only generated in matching
3616 // match(RegL);
3617 format %{ "[$reg]" %}
3618 interface(MEMORY_INTER) %{
3619 base(0x02); // RSP
3620 index(0xffffffff); // No Index
3621 scale(0x0); // No Scale
3622 disp($reg); // Stack Offset
3623 %}
3624 %}
3625
3626 // Special operand allowing long args to int ops to be truncated for free
3627
3628 operand iRegL2I(iRegL reg) %{
3629
3630 op_cost(0);
3631
3632 match(ConvL2I reg);
3633
3634 format %{ "l2i($reg)" %}
3635
3636 interface(REG_INTER)
3637 %}
3638
3639
3640 // Comparison Operands
3641 // NOTE: Label is a predefined operand which should not be redefined in
3642 // the AD file. It is generically handled within the ADLC.
3643
3644 //----------Conditional Branch Operands----------------------------------------
3645 // Comparison Op - This is the operation of the comparison, and is limited to
3646 // the following set of codes:
3647 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
3648 //
3649 // Other attributes of the comparison, such as unsignedness, are specified
3650 // by the comparison instruction that sets a condition code flags register.
3651 // That result is represented by a flags operand whose subtype is appropriate
3652 // to the unsignedness (etc.) of the comparison.
3653 //
3654 // Later, the instruction which matches both the Comparison Op (a Bool) and
3655 // the flags (produced by the Cmp) specifies the coding of the comparison op
3656 // by matching a specific subtype of Bool operand below, such as cmpOpU.
3657
3658
3659 // used for signed integral comparisons and fp comparisons
3660 operand cmpOp()
3661 %{
3662 match(Bool);
3663
3664 format %{ "" %}
3665
3666 // the values in interface derives from struct BoolTest::mask
3667 interface(COND_INTER) %{
3668 equal(0x0, "eq");
3669 greater(0x1, "gt");
3670 overflow(0x2, "overflow");
3671 less(0x3, "lt");
3672 not_equal(0x4, "ne");
3673 less_equal(0x5, "le");
3674 no_overflow(0x6, "no_overflow");
3675 greater_equal(0x7, "ge");
3676 %}
3677 %}
3678
3679 // used for unsigned integral comparisons
3680 operand cmpOpU()
3681 %{
3682 match(Bool);
3683
3684 format %{ "" %}
3685 // the values in interface derives from struct BoolTest::mask
3686 interface(COND_INTER) %{
3687 equal(0x0, "eq");
3688 greater(0x1, "gtu");
3689 overflow(0x2, "overflow");
3690 less(0x3, "ltu");
3691 not_equal(0x4, "ne");
3692 less_equal(0x5, "leu");
3693 no_overflow(0x6, "no_overflow");
3694 greater_equal(0x7, "geu");
3695 %}
3696 %}
3697
3698 // used for certain integral comparisons which can be
3699 // converted to bxx instructions
3700 operand cmpOpEqNe()
3701 %{
3702 match(Bool);
3703 op_cost(0);
3704 predicate(n->as_Bool()->_test._test == BoolTest::ne ||
3705 n->as_Bool()->_test._test == BoolTest::eq);
3706
3707 format %{ "" %}
3708 interface(COND_INTER) %{
3709 equal(0x0, "eq");
3710 greater(0x1, "gt");
3711 overflow(0x2, "overflow");
3712 less(0x3, "lt");
3713 not_equal(0x4, "ne");
3714 less_equal(0x5, "le");
3715 no_overflow(0x6, "no_overflow");
3716 greater_equal(0x7, "ge");
3717 %}
3718 %}
3719
3720 operand cmpOpULtGe()
3721 %{
3722 match(Bool);
3723 op_cost(0);
3724 predicate(n->as_Bool()->_test._test == BoolTest::lt ||
3725 n->as_Bool()->_test._test == BoolTest::ge);
3726
3727 format %{ "" %}
3728 interface(COND_INTER) %{
3729 equal(0x0, "eq");
3730 greater(0x1, "gtu");
3731 overflow(0x2, "overflow");
3732 less(0x3, "ltu");
3733 not_equal(0x4, "ne");
3734 less_equal(0x5, "leu");
3735 no_overflow(0x6, "no_overflow");
3736 greater_equal(0x7, "geu");
3737 %}
3738 %}
3739
3740 operand cmpOpUEqNeLeGt()
3741 %{
3742 match(Bool);
3743 op_cost(0);
3744 predicate(n->as_Bool()->_test._test == BoolTest::ne ||
3745 n->as_Bool()->_test._test == BoolTest::eq ||
3746 n->as_Bool()->_test._test == BoolTest::le ||
3747 n->as_Bool()->_test._test == BoolTest::gt);
3748
3749 format %{ "" %}
3750 interface(COND_INTER) %{
3751 equal(0x0, "eq");
3752 greater(0x1, "gtu");
3753 overflow(0x2, "overflow");
3754 less(0x3, "ltu");
3755 not_equal(0x4, "ne");
3756 less_equal(0x5, "leu");
3757 no_overflow(0x6, "no_overflow");
3758 greater_equal(0x7, "geu");
3759 %}
3760 %}
3761
3762
3763 // Flags register, used as output of compare logic
3764 operand rFlagsReg()
3765 %{
3766 constraint(ALLOC_IN_RC(reg_flags));
3767 match(RegFlags);
3768
3769 op_cost(0);
3770 format %{ "RFLAGS" %}
3771 interface(REG_INTER);
3772 %}
3773
3774 // Special Registers
3775
3776 // Method Register
3777 operand inline_cache_RegP(iRegP reg)
3778 %{
3779 constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
3780 match(reg);
3781 match(iRegPNoSp);
3782 op_cost(0);
3783 format %{ %}
3784 interface(REG_INTER);
3785 %}
3786
3787 //----------OPERAND CLASSES----------------------------------------------------
3788 // Operand Classes are groups of operands that are used as to simplify
3789 // instruction definitions by not requiring the AD writer to specify
3790 // separate instructions for every form of operand when the
3791 // instruction accepts multiple operand types with the same basic
3792 // encoding and format. The classic case of this is memory operands.
3793
3794 // memory is used to define read/write location for load/store
3795 // instruction defs. we can turn a memory op into an Address
3796
3797 opclass memory(indirect, indOffI, indOffL, indirectN, indOffIN, indOffLN);
3798
3799 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
3800 // operations. it allows the src to be either an iRegI or a (ConvL2I
3801 // iRegL). in the latter case the l2i normally planted for a ConvL2I
3802 // can be elided because the 32-bit instruction will just employ the
3803 // lower 32 bits anyway.
3804 //
3805 // n.b. this does not elide all L2I conversions. if the truncated
3806 // value is consumed by more than one operation then the ConvL2I
3807 // cannot be bundled into the consuming nodes so an l2i gets planted
3808 // (actually an addiw $dst, $src, 0) and the downstream instructions
3809 // consume the result of the L2I as an iRegI input. That's a shame since
3810 // the addiw is actually redundant but its not too costly.
3811
3812 opclass iRegIorL2I(iRegI, iRegL2I);
3813 opclass iRegIorL(iRegI, iRegL);
3814 opclass iRegNorP(iRegN, iRegP);
3815 opclass iRegILNP(iRegI, iRegL, iRegN, iRegP);
3816 opclass iRegILNPNoSp(iRegINoSp, iRegLNoSp, iRegNNoSp, iRegPNoSp);
3817 opclass immIorL(immI, immL);
3818
3819 //----------PIPELINE-----------------------------------------------------------
3820 // Rules which define the behavior of the target architectures pipeline.
3821
3822 // For specific pipelines, e.g. generic RISC-V, define the stages of that pipeline
3823 //pipe_desc(ID, EX, MEM, WR);
3824 #define ID S0
3825 #define EX S1
3826 #define MEM S2
3827 #define WR S3
3828
3829 // Integer ALU reg operation
3830 pipeline %{
3831
3832 attributes %{
3833 // RISC-V instructions are of length 2 or 4 bytes.
3834 variable_size_instructions;
3835 instruction_unit_size = 2;
3836
3837 // Up to 4 instructions per bundle
3838 max_instructions_per_bundle = 4;
3839
3840 // The RISC-V processor fetches 64 bytes...
3841 instruction_fetch_unit_size = 64;
3842
3843 // ...in one line.
3844 instruction_fetch_units = 1;
3845 %}
3846
3847 // We don't use an actual pipeline model so don't care about resources
3848 // or description. we do use pipeline classes to introduce fixed
3849 // latencies
3850
3851 //----------RESOURCES----------------------------------------------------------
3852 // Resources are the functional units available to the machine
3853
3854 // Generic RISC-V pipeline
3855 // 1 decoder
3856 // 1 instruction decoded per cycle
3857 // 1 load/store ops per cycle, 1 branch, 1 FPU
3858 // 1 mul, 1 div
3859
3860 resources ( DECODE,
3861 ALU,
3862 MUL,
3863 DIV,
3864 BRANCH,
3865 LDST,
3866 FPU);
3867
3868 //----------PIPELINE DESCRIPTION-----------------------------------------------
3869 // Pipeline Description specifies the stages in the machine's pipeline
3870
3871 // Define the pipeline as a generic 6 stage pipeline
3872 pipe_desc(S0, S1, S2, S3, S4, S5);
3873
3874 //----------PIPELINE CLASSES---------------------------------------------------
3875 // Pipeline Classes describe the stages in which input and output are
3876 // referenced by the hardware pipeline.
3877
3878 pipe_class fp_dop_reg_reg_s(fRegF dst, fRegF src1, fRegF src2)
3879 %{
3880 single_instruction;
3881 src1 : S1(read);
3882 src2 : S2(read);
3883 dst : S5(write);
3884 DECODE : ID;
3885 FPU : S5;
3886 %}
3887
3888 pipe_class fp_dop_reg_reg_d(fRegD dst, fRegD src1, fRegD src2)
3889 %{
3890 src1 : S1(read);
3891 src2 : S2(read);
3892 dst : S5(write);
3893 DECODE : ID;
3894 FPU : S5;
3895 %}
3896
3897 pipe_class fp_uop_s(fRegF dst, fRegF src)
3898 %{
3899 single_instruction;
3900 src : S1(read);
3901 dst : S5(write);
3902 DECODE : ID;
3903 FPU : S5;
3904 %}
3905
3906 pipe_class fp_uop_d(fRegD dst, fRegD src)
3907 %{
3908 single_instruction;
3909 src : S1(read);
3910 dst : S5(write);
3911 DECODE : ID;
3912 FPU : S5;
3913 %}
3914
3915 pipe_class fp_d2f(fRegF dst, fRegD src)
3916 %{
3917 single_instruction;
3918 src : S1(read);
3919 dst : S5(write);
3920 DECODE : ID;
3921 FPU : S5;
3922 %}
3923
3924 pipe_class fp_f2d(fRegD dst, fRegF src)
3925 %{
3926 single_instruction;
3927 src : S1(read);
3928 dst : S5(write);
3929 DECODE : ID;
3930 FPU : S5;
3931 %}
3932
3933 pipe_class fp_f2i(iRegINoSp dst, fRegF src)
3934 %{
3935 single_instruction;
3936 src : S1(read);
3937 dst : S5(write);
3938 DECODE : ID;
3939 FPU : S5;
3940 %}
3941
3942 pipe_class fp_f2l(iRegLNoSp dst, fRegF src)
3943 %{
3944 single_instruction;
3945 src : S1(read);
3946 dst : S5(write);
3947 DECODE : ID;
3948 FPU : S5;
3949 %}
3950
3951 pipe_class fp_i2f(fRegF dst, iRegIorL2I src)
3952 %{
3953 single_instruction;
3954 src : S1(read);
3955 dst : S5(write);
3956 DECODE : ID;
3957 FPU : S5;
3958 %}
3959
3960 pipe_class fp_l2f(fRegF dst, iRegL src)
3961 %{
3962 single_instruction;
3963 src : S1(read);
3964 dst : S5(write);
3965 DECODE : ID;
3966 FPU : S5;
3967 %}
3968
3969 pipe_class fp_d2i(iRegINoSp dst, fRegD src)
3970 %{
3971 single_instruction;
3972 src : S1(read);
3973 dst : S5(write);
3974 DECODE : ID;
3975 FPU : S5;
3976 %}
3977
3978 pipe_class fp_d2l(iRegLNoSp dst, fRegD src)
3979 %{
3980 single_instruction;
3981 src : S1(read);
3982 dst : S5(write);
3983 DECODE : ID;
3984 FPU : S5;
3985 %}
3986
3987 pipe_class fp_i2d(fRegD dst, iRegIorL2I src)
3988 %{
3989 single_instruction;
3990 src : S1(read);
3991 dst : S5(write);
3992 DECODE : ID;
3993 FPU : S5;
3994 %}
3995
3996 pipe_class fp_l2d(fRegD dst, iRegIorL2I src)
3997 %{
3998 single_instruction;
3999 src : S1(read);
4000 dst : S5(write);
4001 DECODE : ID;
4002 FPU : S5;
4003 %}
4004
4005 pipe_class fp_div_s(fRegF dst, fRegF src1, fRegF src2)
4006 %{
4007 single_instruction;
4008 src1 : S1(read);
4009 src2 : S2(read);
4010 dst : S5(write);
4011 DECODE : ID;
4012 FPU : S5;
4013 %}
4014
4015 pipe_class fp_div_d(fRegD dst, fRegD src1, fRegD src2)
4016 %{
4017 single_instruction;
4018 src1 : S1(read);
4019 src2 : S2(read);
4020 dst : S5(write);
4021 DECODE : ID;
4022 FPU : S5;
4023 %}
4024
4025 pipe_class fp_sqrt_s(fRegF dst, fRegF src)
4026 %{
4027 single_instruction;
4028 src : S1(read);
4029 dst : S5(write);
4030 DECODE : ID;
4031 FPU : S5;
4032 %}
4033
4034 pipe_class fp_sqrt_d(fRegD dst, fRegD src)
4035 %{
4036 single_instruction;
4037 src : S1(read);
4038 dst : S5(write);
4039 DECODE : ID;
4040 FPU : S5;
4041 %}
4042
4043 pipe_class fp_load_constant_s(fRegF dst)
4044 %{
4045 single_instruction;
4046 dst : S5(write);
4047 DECODE : ID;
4048 FPU : S5;
4049 %}
4050
4051 pipe_class fp_load_constant_d(fRegD dst)
4052 %{
4053 single_instruction;
4054 dst : S5(write);
4055 DECODE : ID;
4056 FPU : S5;
4057 %}
4058
4059 pipe_class fp_load_mem_s(fRegF dst, memory mem)
4060 %{
4061 single_instruction;
4062 mem : S1(read);
4063 dst : S5(write);
4064 DECODE : ID;
4065 LDST : MEM;
4066 %}
4067
4068 pipe_class fp_load_mem_d(fRegD dst, memory mem)
4069 %{
4070 single_instruction;
4071 mem : S1(read);
4072 dst : S5(write);
4073 DECODE : ID;
4074 LDST : MEM;
4075 %}
4076
4077 pipe_class fp_store_reg_s(fRegF src, memory mem)
4078 %{
4079 single_instruction;
4080 src : S1(read);
4081 mem : S5(write);
4082 DECODE : ID;
4083 LDST : MEM;
4084 %}
4085
4086 pipe_class fp_store_reg_d(fRegD src, memory mem)
4087 %{
4088 single_instruction;
4089 src : S1(read);
4090 mem : S5(write);
4091 DECODE : ID;
4092 LDST : MEM;
4093 %}
4094
4095 //------- Integer ALU operations --------------------------
4096
4097 // Integer ALU reg-reg operation
4098 // Operands needs in ID, result generated in EX
4099 // E.g. ADD Rd, Rs1, Rs2
4100 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
4101 %{
4102 single_instruction;
4103 dst : EX(write);
4104 src1 : ID(read);
4105 src2 : ID(read);
4106 DECODE : ID;
4107 ALU : EX;
4108 %}
4109
4110 // Integer ALU reg operation with constant shift
4111 // E.g. SLLI Rd, Rs1, #shift
4112 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
4113 %{
4114 single_instruction;
4115 dst : EX(write);
4116 src1 : ID(read);
4117 DECODE : ID;
4118 ALU : EX;
4119 %}
4120
4121 // Integer ALU reg-reg operation with variable shift
4122 // both operands must be available in ID
4123 // E.g. SLL Rd, Rs1, Rs2
4124 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
4125 %{
4126 single_instruction;
4127 dst : EX(write);
4128 src1 : ID(read);
4129 src2 : ID(read);
4130 DECODE : ID;
4131 ALU : EX;
4132 %}
4133
4134 // Integer ALU reg operation
4135 // E.g. NEG Rd, Rs2
4136 pipe_class ialu_reg(iRegI dst, iRegI src)
4137 %{
4138 single_instruction;
4139 dst : EX(write);
4140 src : ID(read);
4141 DECODE : ID;
4142 ALU : EX;
4143 %}
4144
4145 // Integer ALU reg immediate operation
4146 // E.g. ADDI Rd, Rs1, #imm
4147 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
4148 %{
4149 single_instruction;
4150 dst : EX(write);
4151 src1 : ID(read);
4152 DECODE : ID;
4153 ALU : EX;
4154 %}
4155
4156 // Integer ALU immediate operation (no source operands)
4157 // E.g. LI Rd, #imm
4158 pipe_class ialu_imm(iRegI dst)
4159 %{
4160 single_instruction;
4161 dst : EX(write);
4162 DECODE : ID;
4163 ALU : EX;
4164 %}
4165
4166 //------- Multiply pipeline operations --------------------
4167
4168 // Multiply reg-reg
4169 // E.g. MULW Rd, Rs1, Rs2
4170 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
4171 %{
4172 single_instruction;
4173 dst : WR(write);
4174 src1 : ID(read);
4175 src2 : ID(read);
4176 DECODE : ID;
4177 MUL : WR;
4178 %}
4179
4180 // E.g. MUL RD, Rs1, Rs2
4181 pipe_class lmul_reg_reg(iRegL dst, iRegL src1, iRegL src2)
4182 %{
4183 single_instruction;
4184 fixed_latency(3); // Maximum latency for 64 bit mul
4185 dst : WR(write);
4186 src1 : ID(read);
4187 src2 : ID(read);
4188 DECODE : ID;
4189 MUL : WR;
4190 %}
4191
4192 //------- Divide pipeline operations --------------------
4193
4194 // E.g. DIVW Rd, Rs1, Rs2
4195 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
4196 %{
4197 single_instruction;
4198 fixed_latency(8); // Maximum latency for 32 bit divide
4199 dst : WR(write);
4200 src1 : ID(read);
4201 src2 : ID(read);
4202 DECODE : ID;
4203 DIV : WR;
4204 %}
4205
4206 // E.g. DIV RD, Rs1, Rs2
4207 pipe_class ldiv_reg_reg(iRegL dst, iRegL src1, iRegL src2)
4208 %{
4209 single_instruction;
4210 fixed_latency(16); // Maximum latency for 64 bit divide
4211 dst : WR(write);
4212 src1 : ID(read);
4213 src2 : ID(read);
4214 DECODE : ID;
4215 DIV : WR;
4216 %}
4217
4218 //------- Load pipeline operations ------------------------
4219
4220 // Load - prefetch
4221 // Eg. PREFETCH_W mem
4222 pipe_class iload_prefetch(memory mem)
4223 %{
4224 single_instruction;
4225 mem : ID(read);
4226 DECODE : ID;
4227 LDST : MEM;
4228 %}
4229
4230 // Load - reg, mem
4231 // E.g. LA Rd, mem
4232 pipe_class iload_reg_mem(iRegI dst, memory mem)
4233 %{
4234 single_instruction;
4235 dst : WR(write);
4236 mem : ID(read);
4237 DECODE : ID;
4238 LDST : MEM;
4239 %}
4240
4241 // Load - reg, reg
4242 // E.g. LD Rd, Rs
4243 pipe_class iload_reg_reg(iRegI dst, iRegI src)
4244 %{
4245 single_instruction;
4246 dst : WR(write);
4247 src : ID(read);
4248 DECODE : ID;
4249 LDST : MEM;
4250 %}
4251
4252 //------- Store pipeline operations -----------------------
4253
4254 // Store - zr, mem
4255 // E.g. SD zr, mem
4256 pipe_class istore_mem(memory mem)
4257 %{
4258 single_instruction;
4259 mem : ID(read);
4260 DECODE : ID;
4261 LDST : MEM;
4262 %}
4263
4264 // Store - reg, mem
4265 // E.g. SD Rs, mem
4266 pipe_class istore_reg_mem(iRegI src, memory mem)
4267 %{
4268 single_instruction;
4269 mem : ID(read);
4270 src : EX(read);
4271 DECODE : ID;
4272 LDST : MEM;
4273 %}
4274
4275 // Store - reg, reg
4276 // E.g. SD Rs2, Rs1
4277 pipe_class istore_reg_reg(iRegI dst, iRegI src)
4278 %{
4279 single_instruction;
4280 dst : ID(read);
4281 src : EX(read);
4282 DECODE : ID;
4283 LDST : MEM;
4284 %}
4285
4286 //------- Control transfer pipeline operations ------------
4287
4288 // Branch
4289 pipe_class pipe_branch()
4290 %{
4291 single_instruction;
4292 DECODE : ID;
4293 BRANCH : EX;
4294 %}
4295
4296 // Branch
4297 pipe_class pipe_branch_reg(iRegI src)
4298 %{
4299 single_instruction;
4300 src : ID(read);
4301 DECODE : ID;
4302 BRANCH : EX;
4303 %}
4304
4305 // Compare & Branch
4306 // E.g. BEQ Rs1, Rs2, L
4307 pipe_class pipe_cmp_branch(iRegI src1, iRegI src2)
4308 %{
4309 single_instruction;
4310 src1 : ID(read);
4311 src2 : ID(read);
4312 DECODE : ID;
4313 BRANCH : EX;
4314 %}
4315
4316 // E.g. BEQZ Rs, L
4317 pipe_class pipe_cmpz_branch(iRegI src)
4318 %{
4319 single_instruction;
4320 src : ID(read);
4321 DECODE : ID;
4322 BRANCH : EX;
4323 %}
4324
4325 //------- Synchronisation operations ----------------------
4326 // Any operation requiring serialization
4327 // E.g. FENCE/Atomic Ops/Load Acquire/Store Release
4328 pipe_class pipe_serial()
4329 %{
4330 single_instruction;
4331 force_serialization;
4332 fixed_latency(16);
4333 DECODE : ID;
4334 LDST : MEM;
4335 %}
4336
4337 pipe_class pipe_slow()
4338 %{
4339 instruction_count(10);
4340 multiple_bundles;
4341 force_serialization;
4342 fixed_latency(16);
4343 DECODE : ID;
4344 LDST : MEM;
4345 %}
4346
4347 // The real do-nothing guy
4348 pipe_class real_empty()
4349 %{
4350 instruction_count(0);
4351 %}
4352
4353 // Empty pipeline class
4354 pipe_class pipe_class_empty()
4355 %{
4356 single_instruction;
4357 fixed_latency(0);
4358 %}
4359
4360 // Default pipeline class.
4361 pipe_class pipe_class_default()
4362 %{
4363 single_instruction;
4364 fixed_latency(2);
4365 %}
4366
4367 // Pipeline class for compares.
4368 pipe_class pipe_class_compare()
4369 %{
4370 single_instruction;
4371 fixed_latency(16);
4372 %}
4373
4374 // Pipeline class for memory operations.
4375 pipe_class pipe_class_memory()
4376 %{
4377 single_instruction;
4378 fixed_latency(16);
4379 %}
4380
4381 // Pipeline class for call.
4382 pipe_class pipe_class_call()
4383 %{
4384 single_instruction;
4385 fixed_latency(100);
4386 %}
4387
4388 // Define the class for the Nop node.
4389 define %{
4390 MachNop = pipe_class_empty;
4391 %}
4392 %}
4393 //----------INSTRUCTIONS-------------------------------------------------------
4394 //
4395 // match -- States which machine-independent subtree may be replaced
4396 // by this instruction.
4397 // ins_cost -- The estimated cost of this instruction is used by instruction
4398 // selection to identify a minimum cost tree of machine
4399 // instructions that matches a tree of machine-independent
4400 // instructions.
4401 // format -- A string providing the disassembly for this instruction.
4402 // The value of an instruction's operand may be inserted
4403 // by referring to it with a '$' prefix.
4404 // opcode -- Three instruction opcodes may be provided. These are referred
4405 // to within an encode class as $primary, $secondary, and $tertiary
4406 // rrspectively. The primary opcode is commonly used to
4407 // indicate the type of machine instruction, while secondary
4408 // and tertiary are often used for prefix options or addressing
4409 // modes.
4410 // ins_encode -- A list of encode classes with parameters. The encode class
4411 // name must have been defined in an 'enc_class' specification
4412 // in the encode section of the architecture description.
4413
4414 // ============================================================================
4415 // Memory (Load/Store) Instructions
4416
4417 // Load Instructions
4418
4419 // Load Byte (8 bit signed)
4420 instruct loadB(iRegINoSp dst, memory mem)
4421 %{
4422 match(Set dst (LoadB mem));
4423
4424 ins_cost(LOAD_COST);
4425 format %{ "lb $dst, $mem\t# byte, #@loadB" %}
4426
4427 ins_encode %{
4428 __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4429 %}
4430
4431 ins_pipe(iload_reg_mem);
4432 %}
4433
4434 // Load Byte (8 bit signed) into long
4435 instruct loadB2L(iRegLNoSp dst, memory mem)
4436 %{
4437 match(Set dst (ConvI2L (LoadB mem)));
4438
4439 ins_cost(LOAD_COST);
4440 format %{ "lb $dst, $mem\t# byte, #@loadB2L" %}
4441
4442 ins_encode %{
4443 __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4444 %}
4445
4446 ins_pipe(iload_reg_mem);
4447 %}
4448
4449 // Load Byte (8 bit unsigned)
4450 instruct loadUB(iRegINoSp dst, memory mem)
4451 %{
4452 match(Set dst (LoadUB mem));
4453
4454 ins_cost(LOAD_COST);
4455 format %{ "lbu $dst, $mem\t# byte, #@loadUB" %}
4456
4457 ins_encode %{
4458 __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4459 %}
4460
4461 ins_pipe(iload_reg_mem);
4462 %}
4463
4464 // Load Byte (8 bit unsigned) into long
4465 instruct loadUB2L(iRegLNoSp dst, memory mem)
4466 %{
4467 match(Set dst (ConvI2L (LoadUB mem)));
4468
4469 ins_cost(LOAD_COST);
4470 format %{ "lbu $dst, $mem\t# byte, #@loadUB2L" %}
4471
4472 ins_encode %{
4473 __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4474 %}
4475
4476 ins_pipe(iload_reg_mem);
4477 %}
4478
4479 // Load Short (16 bit signed)
4480 instruct loadS(iRegINoSp dst, memory mem)
4481 %{
4482 match(Set dst (LoadS mem));
4483
4484 ins_cost(LOAD_COST);
4485 format %{ "lh $dst, $mem\t# short, #@loadS" %}
4486
4487 ins_encode %{
4488 __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4489 %}
4490
4491 ins_pipe(iload_reg_mem);
4492 %}
4493
4494 // Load Short (16 bit signed) into long
4495 instruct loadS2L(iRegLNoSp dst, memory mem)
4496 %{
4497 match(Set dst (ConvI2L (LoadS mem)));
4498
4499 ins_cost(LOAD_COST);
4500 format %{ "lh $dst, $mem\t# short, #@loadS2L" %}
4501
4502 ins_encode %{
4503 __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4504 %}
4505
4506 ins_pipe(iload_reg_mem);
4507 %}
4508
4509 // Load Char (16 bit unsigned)
4510 instruct loadUS(iRegINoSp dst, memory mem)
4511 %{
4512 match(Set dst (LoadUS mem));
4513
4514 ins_cost(LOAD_COST);
4515 format %{ "lhu $dst, $mem\t# short, #@loadUS" %}
4516
4517 ins_encode %{
4518 __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4519 %}
4520
4521 ins_pipe(iload_reg_mem);
4522 %}
4523
4524 // Load Short/Char (16 bit unsigned) into long
4525 instruct loadUS2L(iRegLNoSp dst, memory mem)
4526 %{
4527 match(Set dst (ConvI2L (LoadUS mem)));
4528
4529 ins_cost(LOAD_COST);
4530 format %{ "lhu $dst, $mem\t# short, #@loadUS2L" %}
4531
4532 ins_encode %{
4533 __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4534 %}
4535
4536 ins_pipe(iload_reg_mem);
4537 %}
4538
4539 // Load Integer (32 bit signed)
4540 instruct loadI(iRegINoSp dst, memory mem)
4541 %{
4542 match(Set dst (LoadI mem));
4543
4544 ins_cost(LOAD_COST);
4545 format %{ "lw $dst, $mem\t# int, #@loadI" %}
4546
4547 ins_encode %{
4548 __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4549 %}
4550
4551 ins_pipe(iload_reg_mem);
4552 %}
4553
4554 // Load Integer (32 bit signed) into long
4555 instruct loadI2L(iRegLNoSp dst, memory mem)
4556 %{
4557 match(Set dst (ConvI2L (LoadI mem)));
4558
4559 ins_cost(LOAD_COST);
4560 format %{ "lw $dst, $mem\t# int, #@loadI2L" %}
4561
4562 ins_encode %{
4563 __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4564 %}
4565
4566 ins_pipe(iload_reg_mem);
4567 %}
4568
4569 // Load Integer (32 bit unsigned) into long
4570 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
4571 %{
4572 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
4573
4574 ins_cost(LOAD_COST);
4575 format %{ "lwu $dst, $mem\t# int, #@loadUI2L" %}
4576
4577 ins_encode %{
4578 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4579 %}
4580
4581 ins_pipe(iload_reg_mem);
4582 %}
4583
4584 // Load Long (64 bit signed)
4585 instruct loadL(iRegLNoSp dst, memory mem)
4586 %{
4587 match(Set dst (LoadL mem));
4588
4589 ins_cost(LOAD_COST);
4590 format %{ "ld $dst, $mem\t# int, #@loadL" %}
4591
4592 ins_encode %{
4593 __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4594 %}
4595
4596 ins_pipe(iload_reg_mem);
4597 %}
4598
4599 // Load Range
4600 instruct loadRange(iRegINoSp dst, memory mem)
4601 %{
4602 match(Set dst (LoadRange mem));
4603
4604 ins_cost(LOAD_COST);
4605 format %{ "lwu $dst, $mem\t# range, #@loadRange" %}
4606
4607 ins_encode %{
4608 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4609 %}
4610
4611 ins_pipe(iload_reg_mem);
4612 %}
4613
4614 // Load Pointer
4615 instruct loadP(iRegPNoSp dst, memory mem)
4616 %{
4617 match(Set dst (LoadP mem));
4618 predicate(n->as_Load()->barrier_data() == 0);
4619
4620 ins_cost(LOAD_COST);
4621 format %{ "ld $dst, $mem\t# ptr, #@loadP" %}
4622
4623 ins_encode %{
4624 __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4625 %}
4626
4627 ins_pipe(iload_reg_mem);
4628 %}
4629
4630 // Load Compressed Pointer
4631 instruct loadN(iRegNNoSp dst, memory mem)
4632 %{
4633 predicate(n->as_Load()->barrier_data() == 0);
4634 match(Set dst (LoadN mem));
4635
4636 ins_cost(LOAD_COST);
4637 format %{ "lwu $dst, $mem\t# compressed ptr, #@loadN" %}
4638
4639 ins_encode %{
4640 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4641 %}
4642
4643 ins_pipe(iload_reg_mem);
4644 %}
4645
4646 // Load Klass Pointer
4647 instruct loadKlass(iRegPNoSp dst, memory mem)
4648 %{
4649 match(Set dst (LoadKlass mem));
4650
4651 ins_cost(LOAD_COST);
4652 format %{ "ld $dst, $mem\t# class, #@loadKlass" %}
4653
4654 ins_encode %{
4655 __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4656 %}
4657
4658 ins_pipe(iload_reg_mem);
4659 %}
4660
4661 // Load Narrow Klass Pointer
4662 instruct loadNKlass(iRegNNoSp dst, memory mem)
4663 %{
4664 predicate(!UseCompactObjectHeaders);
4665 match(Set dst (LoadNKlass mem));
4666
4667 ins_cost(LOAD_COST);
4668 format %{ "lwu $dst, $mem\t# compressed class ptr, #@loadNKlass" %}
4669
4670 ins_encode %{
4671 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4672 %}
4673
4674 ins_pipe(iload_reg_mem);
4675 %}
4676
4677 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory mem)
4678 %{
4679 predicate(UseCompactObjectHeaders);
4680 match(Set dst (LoadNKlass mem));
4681
4682 ins_cost(LOAD_COST);
4683 format %{
4684 "lwu $dst, $mem\t# compressed klass ptr, shifted\n\t"
4685 "srli $dst, $dst, markWord::klass_shift_at_offset"
4686 %}
4687
4688 ins_encode %{
4689 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4690 __ srli(as_Register($dst$$reg), as_Register($dst$$reg), (unsigned) markWord::klass_shift_at_offset);
4691 %}
4692
4693 ins_pipe(iload_reg_mem);
4694 %}
4695
4696 // Load Float
4697 instruct loadF(fRegF dst, memory mem)
4698 %{
4699 match(Set dst (LoadF mem));
4700
4701 ins_cost(LOAD_COST);
4702 format %{ "flw $dst, $mem\t# float, #@loadF" %}
4703
4704 ins_encode %{
4705 __ flw(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4706 %}
4707
4708 ins_pipe(fp_load_mem_s);
4709 %}
4710
4711 // Load Double
4712 instruct loadD(fRegD dst, memory mem)
4713 %{
4714 match(Set dst (LoadD mem));
4715
4716 ins_cost(LOAD_COST);
4717 format %{ "fld $dst, $mem\t# double, #@loadD" %}
4718
4719 ins_encode %{
4720 __ fld(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4721 %}
4722
4723 ins_pipe(fp_load_mem_d);
4724 %}
4725
4726 // Load Int Constant
4727 instruct loadConI(iRegINoSp dst, immI src)
4728 %{
4729 match(Set dst src);
4730
4731 ins_cost(ALU_COST);
4732 format %{ "mv $dst, $src\t# int, #@loadConI" %}
4733
4734 ins_encode(riscv_enc_mov_imm(dst, src));
4735
4736 ins_pipe(ialu_imm);
4737 %}
4738
4739 // Load Long Constant
4740 instruct loadConL(iRegLNoSp dst, immL src)
4741 %{
4742 match(Set dst src);
4743
4744 ins_cost(ALU_COST);
4745 format %{ "mv $dst, $src\t# long, #@loadConL" %}
4746
4747 ins_encode(riscv_enc_mov_imm(dst, src));
4748
4749 ins_pipe(ialu_imm);
4750 %}
4751
4752 // Load Pointer Constant
4753 instruct loadConP(iRegPNoSp dst, immP con)
4754 %{
4755 match(Set dst con);
4756
4757 ins_cost(ALU_COST);
4758 format %{ "mv $dst, $con\t# ptr, #@loadConP" %}
4759
4760 ins_encode(riscv_enc_mov_p(dst, con));
4761
4762 ins_pipe(ialu_imm);
4763 %}
4764
4765 // Load Null Pointer Constant
4766 instruct loadConP0(iRegPNoSp dst, immP0 con)
4767 %{
4768 match(Set dst con);
4769
4770 ins_cost(ALU_COST);
4771 format %{ "mv $dst, $con\t# null pointer, #@loadConP0" %}
4772
4773 ins_encode(riscv_enc_mov_zero(dst));
4774
4775 ins_pipe(ialu_imm);
4776 %}
4777
4778 // Load Pointer Constant One
4779 instruct loadConP1(iRegPNoSp dst, immP_1 con)
4780 %{
4781 match(Set dst con);
4782
4783 ins_cost(ALU_COST);
4784 format %{ "mv $dst, $con\t# load ptr constant one, #@loadConP1" %}
4785
4786 ins_encode(riscv_enc_mov_p1(dst));
4787
4788 ins_pipe(ialu_imm);
4789 %}
4790
4791 // Load Narrow Pointer Constant
4792 instruct loadConN(iRegNNoSp dst, immN con)
4793 %{
4794 match(Set dst con);
4795
4796 ins_cost(ALU_COST * 4);
4797 format %{ "mv $dst, $con\t# compressed ptr, #@loadConN" %}
4798
4799 ins_encode(riscv_enc_mov_n(dst, con));
4800
4801 ins_pipe(ialu_imm);
4802 %}
4803
4804 // Load Narrow Null Pointer Constant
4805 instruct loadConN0(iRegNNoSp dst, immN0 con)
4806 %{
4807 match(Set dst con);
4808
4809 ins_cost(ALU_COST);
4810 format %{ "mv $dst, $con\t# compressed null pointer, #@loadConN0" %}
4811
4812 ins_encode(riscv_enc_mov_zero(dst));
4813
4814 ins_pipe(ialu_imm);
4815 %}
4816
4817 // Load Narrow Klass Constant
4818 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
4819 %{
4820 match(Set dst con);
4821
4822 ins_cost(ALU_COST * 6);
4823 format %{ "mv $dst, $con\t# compressed klass ptr, #@loadConNKlass" %}
4824
4825 ins_encode(riscv_enc_mov_nk(dst, con));
4826
4827 ins_pipe(ialu_imm);
4828 %}
4829
4830 // Load Half Float Constant
4831 instruct loadConH(fRegF dst, immH con) %{
4832 match(Set dst con);
4833
4834 ins_cost(LOAD_COST);
4835 format %{
4836 "flh $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConH"
4837 %}
4838
4839 ins_encode %{
4840 assert(UseZfh || UseZfhmin, "must");
4841 if (MacroAssembler::can_hf_imm_load($con$$constant)) {
4842 __ fli_h(as_FloatRegister($dst$$reg), $con$$constant);
4843 } else {
4844 __ flh(as_FloatRegister($dst$$reg), $constantaddress($con));
4845 }
4846 %}
4847
4848 ins_pipe(fp_load_constant_s);
4849 %}
4850
4851 instruct loadConH0(fRegF dst, immH0 con) %{
4852 match(Set dst con);
4853
4854 ins_cost(XFER_COST);
4855
4856 format %{ "fmv.h.x $dst, zr\t# float, #@loadConH0" %}
4857
4858 ins_encode %{
4859 assert(UseZfh || UseZfhmin, "must");
4860 __ fmv_h_x(as_FloatRegister($dst$$reg), zr);
4861 %}
4862
4863 ins_pipe(fp_load_constant_s);
4864 %}
4865
4866 // Load Float Constant
4867 instruct loadConF(fRegF dst, immF con) %{
4868 match(Set dst con);
4869
4870 ins_cost(LOAD_COST);
4871 format %{
4872 "flw $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConF"
4873 %}
4874
4875 ins_encode %{
4876 if (MacroAssembler::can_fp_imm_load($con$$constant)) {
4877 __ fli_s(as_FloatRegister($dst$$reg), $con$$constant);
4878 } else {
4879 __ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
4880 }
4881 %}
4882
4883 ins_pipe(fp_load_constant_s);
4884 %}
4885
4886 instruct loadConF0(fRegF dst, immF0 con) %{
4887 match(Set dst con);
4888
4889 ins_cost(XFER_COST);
4890
4891 format %{ "fmv.w.x $dst, zr\t# float, #@loadConF0" %}
4892
4893 ins_encode %{
4894 __ fmv_w_x(as_FloatRegister($dst$$reg), zr);
4895 %}
4896
4897 ins_pipe(fp_load_constant_s);
4898 %}
4899
4900 // Load Double Constant
4901 instruct loadConD(fRegD dst, immD con) %{
4902 match(Set dst con);
4903
4904 ins_cost(LOAD_COST);
4905 format %{
4906 "fld $dst, [$constantaddress]\t# load from constant table: double=$con, #@loadConD"
4907 %}
4908
4909 ins_encode %{
4910 if (MacroAssembler::can_dp_imm_load($con$$constant)) {
4911 __ fli_d(as_FloatRegister($dst$$reg), $con$$constant);
4912 } else {
4913 __ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
4914 }
4915 %}
4916
4917 ins_pipe(fp_load_constant_d);
4918 %}
4919
4920 instruct loadConD0(fRegD dst, immD0 con) %{
4921 match(Set dst con);
4922
4923 ins_cost(XFER_COST);
4924
4925 format %{ "fmv.d.x $dst, zr\t# double, #@loadConD0" %}
4926
4927 ins_encode %{
4928 __ fmv_d_x(as_FloatRegister($dst$$reg), zr);
4929 %}
4930
4931 ins_pipe(fp_load_constant_d);
4932 %}
4933
4934 // Store Byte
4935 instruct storeB(iRegIorL2I src, memory mem)
4936 %{
4937 match(Set mem (StoreB mem src));
4938
4939 ins_cost(STORE_COST);
4940 format %{ "sb $src, $mem\t# byte, #@storeB" %}
4941
4942 ins_encode %{
4943 __ sb(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
4944 %}
4945
4946 ins_pipe(istore_reg_mem);
4947 %}
4948
4949 instruct storeimmB0(immI0 zero, memory mem)
4950 %{
4951 match(Set mem (StoreB mem zero));
4952
4953 ins_cost(STORE_COST);
4954 format %{ "sb zr, $mem\t# byte, #@storeimmB0" %}
4955
4956 ins_encode %{
4957 __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
4958 %}
4959
4960 ins_pipe(istore_mem);
4961 %}
4962
4963 // Store Char/Short
4964 instruct storeC(iRegIorL2I src, memory mem)
4965 %{
4966 match(Set mem (StoreC mem src));
4967
4968 ins_cost(STORE_COST);
4969 format %{ "sh $src, $mem\t# short, #@storeC" %}
4970
4971 ins_encode %{
4972 __ sh(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
4973 %}
4974
4975 ins_pipe(istore_reg_mem);
4976 %}
4977
4978 instruct storeimmC0(immI0 zero, memory mem)
4979 %{
4980 match(Set mem (StoreC mem zero));
4981
4982 ins_cost(STORE_COST);
4983 format %{ "sh zr, $mem\t# short, #@storeimmC0" %}
4984
4985 ins_encode %{
4986 __ sh(zr, Address(as_Register($mem$$base), $mem$$disp));
4987 %}
4988
4989 ins_pipe(istore_mem);
4990 %}
4991
4992 // Store Integer
4993 instruct storeI(iRegIorL2I src, memory mem)
4994 %{
4995 match(Set mem(StoreI mem src));
4996
4997 ins_cost(STORE_COST);
4998 format %{ "sw $src, $mem\t# int, #@storeI" %}
4999
5000 ins_encode %{
5001 __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5002 %}
5003
5004 ins_pipe(istore_reg_mem);
5005 %}
5006
5007 instruct storeimmI0(immI0 zero, memory mem)
5008 %{
5009 match(Set mem(StoreI mem zero));
5010
5011 ins_cost(STORE_COST);
5012 format %{ "sw zr, $mem\t# int, #@storeimmI0" %}
5013
5014 ins_encode %{
5015 __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
5016 %}
5017
5018 ins_pipe(istore_mem);
5019 %}
5020
5021 // Store Long (64 bit signed)
5022 instruct storeL(iRegL src, memory mem)
5023 %{
5024 match(Set mem (StoreL mem src));
5025
5026 ins_cost(STORE_COST);
5027 format %{ "sd $src, $mem\t# long, #@storeL" %}
5028
5029 ins_encode %{
5030 __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5031 %}
5032
5033 ins_pipe(istore_reg_mem);
5034 %}
5035
5036 // Store Long (64 bit signed)
5037 instruct storeimmL0(immL0 zero, memory mem)
5038 %{
5039 match(Set mem (StoreL mem zero));
5040
5041 ins_cost(STORE_COST);
5042 format %{ "sd zr, $mem\t# long, #@storeimmL0" %}
5043
5044 ins_encode %{
5045 __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
5046 %}
5047
5048 ins_pipe(istore_mem);
5049 %}
5050
5051 // Store Pointer
5052 instruct storeP(iRegP src, memory mem)
5053 %{
5054 match(Set mem (StoreP mem src));
5055 predicate(n->as_Store()->barrier_data() == 0);
5056
5057 ins_cost(STORE_COST);
5058 format %{ "sd $src, $mem\t# ptr, #@storeP" %}
5059
5060 ins_encode %{
5061 __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5062 %}
5063
5064 ins_pipe(istore_reg_mem);
5065 %}
5066
5067 // Store Pointer
5068 instruct storeimmP0(immP0 zero, memory mem)
5069 %{
5070 match(Set mem (StoreP mem zero));
5071 predicate(n->as_Store()->barrier_data() == 0);
5072
5073 ins_cost(STORE_COST);
5074 format %{ "sd zr, $mem\t# ptr, #@storeimmP0" %}
5075
5076 ins_encode %{
5077 __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
5078 %}
5079
5080 ins_pipe(istore_mem);
5081 %}
5082
5083 // Store Compressed Pointer
5084 instruct storeN(iRegN src, memory mem)
5085 %{
5086 predicate(n->as_Store()->barrier_data() == 0);
5087 match(Set mem (StoreN mem src));
5088
5089 ins_cost(STORE_COST);
5090 format %{ "sw $src, $mem\t# compressed ptr, #@storeN" %}
5091
5092 ins_encode %{
5093 __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5094 %}
5095
5096 ins_pipe(istore_reg_mem);
5097 %}
5098
5099 instruct storeImmN0(immN0 zero, memory mem)
5100 %{
5101 predicate(n->as_Store()->barrier_data() == 0);
5102 match(Set mem (StoreN mem zero));
5103
5104 ins_cost(STORE_COST);
5105 format %{ "sw zr, $mem\t# compressed ptr, #@storeImmN0" %}
5106
5107 ins_encode %{
5108 __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
5109 %}
5110
5111 ins_pipe(istore_reg_mem);
5112 %}
5113
5114 // Store Float
5115 instruct storeF(fRegF src, memory mem)
5116 %{
5117 match(Set mem (StoreF mem src));
5118
5119 ins_cost(STORE_COST);
5120 format %{ "fsw $src, $mem\t# float, #@storeF" %}
5121
5122 ins_encode %{
5123 __ fsw(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5124 %}
5125
5126 ins_pipe(fp_store_reg_s);
5127 %}
5128
5129 // Store Double
5130 instruct storeD(fRegD src, memory mem)
5131 %{
5132 match(Set mem (StoreD mem src));
5133
5134 ins_cost(STORE_COST);
5135 format %{ "fsd $src, $mem\t# double, #@storeD" %}
5136
5137 ins_encode %{
5138 __ fsd(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5139 %}
5140
5141 ins_pipe(fp_store_reg_d);
5142 %}
5143
5144 // Store Compressed Klass Pointer
5145 instruct storeNKlass(iRegN src, memory mem)
5146 %{
5147 match(Set mem (StoreNKlass mem src));
5148
5149 ins_cost(STORE_COST);
5150 format %{ "sw $src, $mem\t# compressed klass ptr, #@storeNKlass" %}
5151
5152 ins_encode %{
5153 __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5154 %}
5155
5156 ins_pipe(istore_reg_mem);
5157 %}
5158
5159 // ============================================================================
5160 // Prefetch instructions
5161 // Must be safe to execute with invalid address (cannot fault).
5162
5163 instruct prefetchalloc( memory mem ) %{
5164 predicate(UseZicbop);
5165 match(PrefetchAllocation mem);
5166
5167 ins_cost(ALU_COST * 1);
5168 format %{ "prefetch_w $mem\t# Prefetch for write" %}
5169
5170 ins_encode %{
5171 if (Assembler::is_simm12($mem$$disp)) {
5172 if (($mem$$disp & 0x1f) == 0) {
5173 __ prefetch_w(as_Register($mem$$base), $mem$$disp);
5174 } else {
5175 __ addi(t0, as_Register($mem$$base), $mem$$disp);
5176 __ prefetch_w(t0, 0);
5177 }
5178 } else {
5179 __ mv(t0, $mem$$disp);
5180 __ add(t0, as_Register($mem$$base), t0);
5181 __ prefetch_w(t0, 0);
5182 }
5183 %}
5184
5185 ins_pipe(iload_prefetch);
5186 %}
5187
5188 // ============================================================================
5189 // Atomic operation instructions
5190 //
5191
5192 // standard CompareAndSwapX when we are using barriers
5193 // these have higher priority than the rules selected by a predicate
5194 instruct compareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5195 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5196 %{
5197 predicate(!UseZabha || !UseZacas);
5198
5199 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5200
5201 ins_cost(2 * VOLATILE_REF_COST);
5202
5203 effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
5204
5205 format %{
5206 "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5207 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB_narrow"
5208 %}
5209
5210 ins_encode %{
5211 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5212 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5213 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5214 %}
5215
5216 ins_pipe(pipe_slow);
5217 %}
5218
5219 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5220 %{
5221 predicate(UseZabha && UseZacas);
5222
5223 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5224
5225 ins_cost(2 * VOLATILE_REF_COST);
5226
5227 format %{
5228 "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5229 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB"
5230 %}
5231
5232 ins_encode %{
5233 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5234 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5235 true /* result as bool */);
5236 %}
5237
5238 ins_pipe(pipe_slow);
5239 %}
5240
5241 instruct compareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5242 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5243 %{
5244 predicate(!UseZabha || !UseZacas);
5245
5246 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5247
5248 ins_cost(2 * VOLATILE_REF_COST);
5249
5250 effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
5251
5252 format %{
5253 "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5254 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS_narrow"
5255 %}
5256
5257 ins_encode %{
5258 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5259 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5260 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5261 %}
5262
5263 ins_pipe(pipe_slow);
5264 %}
5265
5266 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5267 %{
5268 predicate(UseZabha && UseZacas);
5269
5270 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5271
5272 ins_cost(2 * VOLATILE_REF_COST);
5273
5274 format %{
5275 "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5276 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS"
5277 %}
5278
5279 ins_encode %{
5280 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5281 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5282 true /* result as bool */);
5283 %}
5284
5285 ins_pipe(pipe_slow);
5286 %}
5287
5288 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5289 %{
5290 match(Set res (CompareAndSwapI mem (Binary oldval newval)));
5291
5292 ins_cost(2 * VOLATILE_REF_COST);
5293
5294 format %{
5295 "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
5296 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI"
5297 %}
5298
5299 ins_encode %{
5300 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5301 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5302 /*result as bool*/ true);
5303 %}
5304
5305 ins_pipe(pipe_slow);
5306 %}
5307
5308 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
5309 %{
5310 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
5311
5312 ins_cost(2 * VOLATILE_REF_COST);
5313
5314 format %{
5315 "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
5316 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL"
5317 %}
5318
5319 ins_encode %{
5320 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5321 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5322 /*result as bool*/ true);
5323 %}
5324
5325 ins_pipe(pipe_slow);
5326 %}
5327
5328 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
5329 %{
5330 predicate(n->as_LoadStore()->barrier_data() == 0);
5331
5332 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
5333
5334 ins_cost(2 * VOLATILE_REF_COST);
5335
5336 format %{
5337 "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
5338 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP"
5339 %}
5340
5341 ins_encode %{
5342 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5343 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5344 /*result as bool*/ true);
5345 %}
5346
5347 ins_pipe(pipe_slow);
5348 %}
5349
5350 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
5351 %{
5352 predicate(n->as_LoadStore()->barrier_data() == 0);
5353
5354 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
5355
5356 ins_cost(2 * VOLATILE_REF_COST);
5357
5358 format %{
5359 "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
5360 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN"
5361 %}
5362
5363 ins_encode %{
5364 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5365 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5366 /*result as bool*/ true);
5367 %}
5368
5369 ins_pipe(pipe_slow);
5370 %}
5371
5372 // alternative CompareAndSwapX when we are eliding barriers
5373 instruct compareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5374 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5375 %{
5376 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5377
5378 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5379
5380 ins_cost(2 * VOLATILE_REF_COST);
5381
5382 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5383
5384 format %{
5385 "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5386 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq_narrow"
5387 %}
5388
5389 ins_encode %{
5390 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5391 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5392 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5393 %}
5394
5395 ins_pipe(pipe_slow);
5396 %}
5397
5398 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5399 %{
5400 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5401
5402 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5403
5404 ins_cost(2 * VOLATILE_REF_COST);
5405
5406 format %{
5407 "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5408 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq"
5409 %}
5410
5411 ins_encode %{
5412 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5413 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5414 true /* result as bool */);
5415 %}
5416
5417 ins_pipe(pipe_slow);
5418 %}
5419
5420 instruct compareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5421 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5422 %{
5423 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5424
5425 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5426
5427 ins_cost(2 * VOLATILE_REF_COST);
5428
5429 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5430
5431 format %{
5432 "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5433 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq_narrow"
5434 %}
5435
5436 ins_encode %{
5437 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5438 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5439 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5440 %}
5441
5442 ins_pipe(pipe_slow);
5443 %}
5444
5445 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5446 %{
5447 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5448
5449 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5450
5451 ins_cost(2 * VOLATILE_REF_COST);
5452
5453 format %{
5454 "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5455 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq"
5456 %}
5457
5458 ins_encode %{
5459 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5460 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5461 true /* result as bool */);
5462 %}
5463
5464 ins_pipe(pipe_slow);
5465 %}
5466
5467 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5468 %{
5469 predicate(needs_acquiring_load_reserved(n));
5470
5471 match(Set res (CompareAndSwapI mem (Binary oldval newval)));
5472
5473 ins_cost(2 * VOLATILE_REF_COST);
5474
5475 format %{
5476 "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
5477 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq"
5478 %}
5479
5480 ins_encode %{
5481 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5482 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5483 /*result as bool*/ true);
5484 %}
5485
5486 ins_pipe(pipe_slow);
5487 %}
5488
5489 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
5490 %{
5491 predicate(needs_acquiring_load_reserved(n));
5492
5493 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
5494
5495 ins_cost(2 * VOLATILE_REF_COST);
5496
5497 format %{
5498 "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
5499 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq"
5500 %}
5501
5502 ins_encode %{
5503 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5504 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5505 /*result as bool*/ true);
5506 %}
5507
5508 ins_pipe(pipe_slow);
5509 %}
5510
5511 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
5512 %{
5513 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
5514
5515 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
5516
5517 ins_cost(2 * VOLATILE_REF_COST);
5518
5519 format %{
5520 "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
5521 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq"
5522 %}
5523
5524 ins_encode %{
5525 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5526 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5527 /*result as bool*/ true);
5528 %}
5529
5530 ins_pipe(pipe_slow);
5531 %}
5532
5533 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
5534 %{
5535 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
5536
5537 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
5538
5539 ins_cost(2 * VOLATILE_REF_COST);
5540
5541 format %{
5542 "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
5543 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq"
5544 %}
5545
5546 ins_encode %{
5547 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5548 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5549 /*result as bool*/ true);
5550 %}
5551
5552 ins_pipe(pipe_slow);
5553 %}
5554
5555 // Sundry CAS operations. Note that release is always true,
5556 // regardless of the memory ordering of the CAS. This is because we
5557 // need the volatile case to be sequentially consistent but there is
5558 // no trailing StoreLoad barrier emitted by C2. Unfortunately we
5559 // can't check the type of memory ordering here, so we always emit a
5560 // sc_d(w) with rl bit set.
5561 instruct compareAndExchangeB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5562 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5563 %{
5564 predicate(!UseZabha || !UseZacas);
5565
5566 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5567
5568 ins_cost(2 * VOLATILE_REF_COST);
5569
5570 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5571
5572 format %{
5573 "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB_narrow"
5574 %}
5575
5576 ins_encode %{
5577 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5578 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5579 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5580 %}
5581
5582 ins_pipe(pipe_slow);
5583 %}
5584
5585 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5586 %{
5587 predicate(UseZabha && UseZacas);
5588
5589 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5590
5591 ins_cost(2 * VOLATILE_REF_COST);
5592
5593 format %{
5594 "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB"
5595 %}
5596
5597 ins_encode %{
5598 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5599 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5600 %}
5601
5602 ins_pipe(pipe_slow);
5603 %}
5604
5605 instruct compareAndExchangeS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5606 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5607 %{
5608 predicate(!UseZabha || !UseZacas);
5609
5610 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5611
5612 ins_cost(2 * VOLATILE_REF_COST);
5613
5614 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5615
5616 format %{
5617 "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS_narrow"
5618 %}
5619
5620 ins_encode %{
5621 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5622 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5623 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5624 %}
5625
5626 ins_pipe(pipe_slow);
5627 %}
5628
5629 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5630 %{
5631 predicate(UseZabha && UseZacas);
5632
5633 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5634
5635 ins_cost(2 * VOLATILE_REF_COST);
5636
5637 format %{
5638 "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS"
5639 %}
5640
5641 ins_encode %{
5642 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5643 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5644 %}
5645
5646 ins_pipe(pipe_slow);
5647 %}
5648
5649 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5650 %{
5651 match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
5652
5653 ins_cost(2 * VOLATILE_REF_COST);
5654
5655 format %{
5656 "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI"
5657 %}
5658
5659 ins_encode %{
5660 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5661 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5662 %}
5663
5664 ins_pipe(pipe_slow);
5665 %}
5666
5667 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
5668 %{
5669 match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
5670
5671 ins_cost(2 * VOLATILE_REF_COST);
5672
5673 format %{
5674 "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL"
5675 %}
5676
5677 ins_encode %{
5678 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5679 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5680 %}
5681
5682 ins_pipe(pipe_slow);
5683 %}
5684
5685 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
5686 %{
5687 predicate(n->as_LoadStore()->barrier_data() == 0);
5688
5689 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
5690
5691 ins_cost(2 * VOLATILE_REF_COST);
5692
5693 format %{
5694 "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN"
5695 %}
5696
5697 ins_encode %{
5698 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5699 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5700 %}
5701
5702 ins_pipe(pipe_slow);
5703 %}
5704
5705 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
5706 %{
5707 predicate(n->as_LoadStore()->barrier_data() == 0);
5708
5709 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
5710
5711 ins_cost(2 * VOLATILE_REF_COST);
5712
5713 format %{
5714 "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP"
5715 %}
5716
5717 ins_encode %{
5718 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5719 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5720 %}
5721
5722 ins_pipe(pipe_slow);
5723 %}
5724
5725 instruct compareAndExchangeBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5726 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5727 %{
5728 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5729
5730 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5731
5732 ins_cost(2 * VOLATILE_REF_COST);
5733
5734 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5735
5736 format %{
5737 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq_narrow"
5738 %}
5739
5740 ins_encode %{
5741 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5742 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5743 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5744 %}
5745
5746 ins_pipe(pipe_slow);
5747 %}
5748
5749 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5750 %{
5751 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5752
5753 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5754
5755 ins_cost(2 * VOLATILE_REF_COST);
5756
5757 format %{
5758 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq"
5759 %}
5760
5761 ins_encode %{
5762 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5763 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5764 %}
5765
5766 ins_pipe(pipe_slow);
5767 %}
5768
5769 instruct compareAndExchangeSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5770 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5771 %{
5772 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5773
5774 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5775
5776 ins_cost(2 * VOLATILE_REF_COST);
5777
5778 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5779
5780 format %{
5781 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq_narrow"
5782 %}
5783
5784 ins_encode %{
5785 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5786 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5787 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5788 %}
5789
5790 ins_pipe(pipe_slow);
5791 %}
5792
5793 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5794 %{
5795 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5796
5797 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5798
5799 ins_cost(2 * VOLATILE_REF_COST);
5800
5801 format %{
5802 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq"
5803 %}
5804
5805 ins_encode %{
5806 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5807 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5808 %}
5809
5810 ins_pipe(pipe_slow);
5811 %}
5812
5813 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5814 %{
5815 predicate(needs_acquiring_load_reserved(n));
5816
5817 match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
5818
5819 ins_cost(2 * VOLATILE_REF_COST);
5820
5821 format %{
5822 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq"
5823 %}
5824
5825 ins_encode %{
5826 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5827 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5828 %}
5829
5830 ins_pipe(pipe_slow);
5831 %}
5832
5833 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
5834 %{
5835 predicate(needs_acquiring_load_reserved(n));
5836
5837 match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
5838
5839 ins_cost(2 * VOLATILE_REF_COST);
5840
5841 format %{
5842 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq"
5843 %}
5844
5845 ins_encode %{
5846 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5847 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5848 %}
5849
5850 ins_pipe(pipe_slow);
5851 %}
5852
5853 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
5854 %{
5855 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
5856
5857 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
5858
5859 ins_cost(2 * VOLATILE_REF_COST);
5860
5861 format %{
5862 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq"
5863 %}
5864
5865 ins_encode %{
5866 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5867 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5868 %}
5869
5870 ins_pipe(pipe_slow);
5871 %}
5872
5873 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
5874 %{
5875 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
5876
5877 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
5878
5879 ins_cost(2 * VOLATILE_REF_COST);
5880
5881 format %{
5882 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq"
5883 %}
5884
5885 ins_encode %{
5886 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5887 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5888 %}
5889
5890 ins_pipe(pipe_slow);
5891 %}
5892
5893 instruct weakCompareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5894 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5895 %{
5896 predicate(!UseZabha || !UseZacas);
5897
5898 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
5899
5900 ins_cost(2 * VOLATILE_REF_COST);
5901
5902 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5903
5904 format %{
5905 "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5906 "# $res == 1 when success, #@weakCompareAndSwapB_narrow"
5907 %}
5908
5909 ins_encode %{
5910 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5911 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5912 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5913 %}
5914
5915 ins_pipe(pipe_slow);
5916 %}
5917
5918 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5919 %{
5920 predicate(UseZabha && UseZacas);
5921
5922 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
5923
5924 ins_cost(2 * VOLATILE_REF_COST);
5925
5926 format %{
5927 "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5928 "# $res == 1 when success, #@weakCompareAndSwapB"
5929 %}
5930
5931 ins_encode %{
5932 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5933 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5934 %}
5935
5936 ins_pipe(pipe_slow);
5937 %}
5938
5939 instruct weakCompareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5940 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5941 %{
5942 predicate(!UseZabha || !UseZacas);
5943
5944 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
5945
5946 ins_cost(2 * VOLATILE_REF_COST);
5947
5948 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5949
5950 format %{
5951 "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5952 "# $res == 1 when success, #@weakCompareAndSwapS_narrow"
5953 %}
5954
5955 ins_encode %{
5956 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5957 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5958 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5959 %}
5960
5961 ins_pipe(pipe_slow);
5962 %}
5963
5964 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5965 %{
5966 predicate(UseZabha && UseZacas);
5967
5968 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
5969
5970 ins_cost(2 * VOLATILE_REF_COST);
5971
5972 format %{
5973 "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5974 "# $res == 1 when success, #@weakCompareAndSwapS"
5975 %}
5976
5977 ins_encode %{
5978 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5979 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5980 %}
5981
5982 ins_pipe(pipe_slow);
5983 %}
5984
5985 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5986 %{
5987 match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
5988
5989 ins_cost(2 * VOLATILE_REF_COST);
5990
5991 format %{
5992 "weak_cmpxchg $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5993 "# $res == 1 when success, #@weakCompareAndSwapI"
5994 %}
5995
5996 ins_encode %{
5997 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5998 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5999 %}
6000
6001 ins_pipe(pipe_slow);
6002 %}
6003
6004 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
6005 %{
6006 match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
6007
6008 ins_cost(2 * VOLATILE_REF_COST);
6009
6010 format %{
6011 "weak_cmpxchg $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6012 "# $res == 1 when success, #@weakCompareAndSwapL"
6013 %}
6014
6015 ins_encode %{
6016 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6017 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6018 %}
6019
6020 ins_pipe(pipe_slow);
6021 %}
6022
6023 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
6024 %{
6025 predicate(n->as_LoadStore()->barrier_data() == 0);
6026
6027 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
6028
6029 ins_cost(2 * VOLATILE_REF_COST);
6030
6031 format %{
6032 "weak_cmpxchg $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6033 "# $res == 1 when success, #@weakCompareAndSwapN"
6034 %}
6035
6036 ins_encode %{
6037 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
6038 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6039 %}
6040
6041 ins_pipe(pipe_slow);
6042 %}
6043
6044 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
6045 %{
6046 predicate(n->as_LoadStore()->barrier_data() == 0);
6047
6048 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
6049
6050 ins_cost(2 * VOLATILE_REF_COST);
6051
6052 format %{
6053 "weak_cmpxchg $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6054 "# $res == 1 when success, #@weakCompareAndSwapP"
6055 %}
6056
6057 ins_encode %{
6058 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6059 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6060 %}
6061
6062 ins_pipe(pipe_slow);
6063 %}
6064
6065 instruct weakCompareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
6066 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
6067 %{
6068 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
6069
6070 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
6071
6072 ins_cost(2 * VOLATILE_REF_COST);
6073
6074 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
6075
6076 format %{
6077 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6078 "# $res == 1 when success, #@weakCompareAndSwapBAcq_narrow"
6079 %}
6080
6081 ins_encode %{
6082 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
6083 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
6084 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
6085 %}
6086
6087 ins_pipe(pipe_slow);
6088 %}
6089
6090 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
6091 %{
6092 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
6093
6094 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
6095
6096 ins_cost(2 * VOLATILE_REF_COST);
6097
6098 format %{
6099 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6100 "# $res == 1 when success, #@weakCompareAndSwapBAcq"
6101 %}
6102
6103 ins_encode %{
6104 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
6105 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6106 %}
6107
6108 ins_pipe(pipe_slow);
6109 %}
6110
6111 instruct weakCompareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
6112 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
6113 %{
6114 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
6115
6116 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
6117
6118 ins_cost(2 * VOLATILE_REF_COST);
6119
6120 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
6121
6122 format %{
6123 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6124 "# $res == 1 when success, #@weakCompareAndSwapSAcq_narrow"
6125 %}
6126
6127 ins_encode %{
6128 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
6129 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
6130 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
6131 %}
6132
6133 ins_pipe(pipe_slow);
6134 %}
6135
6136 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
6137 %{
6138 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
6139
6140 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
6141
6142 ins_cost(2 * VOLATILE_REF_COST);
6143
6144 format %{
6145 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6146 "# $res == 1 when success, #@weakCompareAndSwapSAcq"
6147 %}
6148
6149 ins_encode %{
6150 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
6151 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6152 %}
6153
6154 ins_pipe(pipe_slow);
6155 %}
6156
6157 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
6158 %{
6159 predicate(needs_acquiring_load_reserved(n));
6160
6161 match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
6162
6163 ins_cost(2 * VOLATILE_REF_COST);
6164
6165 format %{
6166 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6167 "# $res == 1 when success, #@weakCompareAndSwapIAcq"
6168 %}
6169
6170 ins_encode %{
6171 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
6172 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6173 %}
6174
6175 ins_pipe(pipe_slow);
6176 %}
6177
6178 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
6179 %{
6180 predicate(needs_acquiring_load_reserved(n));
6181
6182 match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
6183
6184 ins_cost(2 * VOLATILE_REF_COST);
6185
6186 format %{
6187 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6188 "# $res == 1 when success, #@weakCompareAndSwapLAcq"
6189 %}
6190
6191 ins_encode %{
6192 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6193 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6194 %}
6195
6196 ins_pipe(pipe_slow);
6197 %}
6198
6199 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
6200 %{
6201 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
6202
6203 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
6204
6205 ins_cost(2 * VOLATILE_REF_COST);
6206
6207 format %{
6208 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6209 "# $res == 1 when success, #@weakCompareAndSwapNAcq"
6210 %}
6211
6212 ins_encode %{
6213 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
6214 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6215 %}
6216
6217 ins_pipe(pipe_slow);
6218 %}
6219
6220 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
6221 %{
6222 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
6223
6224 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
6225
6226 ins_cost(2 * VOLATILE_REF_COST);
6227
6228 format %{
6229 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6230 "\t# $res == 1 when success, #@weakCompareAndSwapPAcq"
6231 %}
6232
6233 ins_encode %{
6234 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6235 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6236 %}
6237
6238 ins_pipe(pipe_slow);
6239 %}
6240
6241 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev)
6242 %{
6243 match(Set prev (GetAndSetI mem newv));
6244
6245 ins_cost(ALU_COST);
6246
6247 format %{ "atomic_xchgw $prev, $newv, [$mem]\t#@get_and_setI" %}
6248
6249 ins_encode %{
6250 __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
6251 %}
6252
6253 ins_pipe(pipe_serial);
6254 %}
6255
6256 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev)
6257 %{
6258 match(Set prev (GetAndSetL mem newv));
6259
6260 ins_cost(ALU_COST);
6261
6262 format %{ "atomic_xchg $prev, $newv, [$mem]\t#@get_and_setL" %}
6263
6264 ins_encode %{
6265 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
6266 %}
6267
6268 ins_pipe(pipe_serial);
6269 %}
6270
6271 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev)
6272 %{
6273 predicate(n->as_LoadStore()->barrier_data() == 0);
6274
6275 match(Set prev (GetAndSetN mem newv));
6276
6277 ins_cost(ALU_COST);
6278
6279 format %{ "atomic_xchgwu $prev, $newv, [$mem]\t#@get_and_setN" %}
6280
6281 ins_encode %{
6282 __ atomic_xchgwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
6283 %}
6284
6285 ins_pipe(pipe_serial);
6286 %}
6287
6288 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev)
6289 %{
6290 predicate(n->as_LoadStore()->barrier_data() == 0);
6291 match(Set prev (GetAndSetP mem newv));
6292
6293 ins_cost(ALU_COST);
6294
6295 format %{ "atomic_xchg $prev, $newv, [$mem]\t#@get_and_setP" %}
6296
6297 ins_encode %{
6298 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
6299 %}
6300
6301 ins_pipe(pipe_serial);
6302 %}
6303
6304 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev)
6305 %{
6306 predicate(needs_acquiring_load_reserved(n));
6307
6308 match(Set prev (GetAndSetI mem newv));
6309
6310 ins_cost(ALU_COST);
6311
6312 format %{ "atomic_xchgw_acq $prev, $newv, [$mem]\t#@get_and_setIAcq" %}
6313
6314 ins_encode %{
6315 __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
6316 %}
6317
6318 ins_pipe(pipe_serial);
6319 %}
6320
6321 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev)
6322 %{
6323 predicate(needs_acquiring_load_reserved(n));
6324
6325 match(Set prev (GetAndSetL mem newv));
6326
6327 ins_cost(ALU_COST);
6328
6329 format %{ "atomic_xchg_acq $prev, $newv, [$mem]\t#@get_and_setLAcq" %}
6330
6331 ins_encode %{
6332 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
6333 %}
6334
6335 ins_pipe(pipe_serial);
6336 %}
6337
6338 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev)
6339 %{
6340 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
6341
6342 match(Set prev (GetAndSetN mem newv));
6343
6344 ins_cost(ALU_COST);
6345
6346 format %{ "atomic_xchgwu_acq $prev, $newv, [$mem]\t#@get_and_setNAcq" %}
6347
6348 ins_encode %{
6349 __ atomic_xchgalwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
6350 %}
6351
6352 ins_pipe(pipe_serial);
6353 %}
6354
6355 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev)
6356 %{
6357 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
6358
6359 match(Set prev (GetAndSetP mem newv));
6360
6361 ins_cost(ALU_COST);
6362
6363 format %{ "atomic_xchg_acq $prev, $newv, [$mem]\t#@get_and_setPAcq" %}
6364
6365 ins_encode %{
6366 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
6367 %}
6368
6369 ins_pipe(pipe_serial);
6370 %}
6371
6372 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr)
6373 %{
6374 match(Set newval (GetAndAddL mem incr));
6375
6376 ins_cost(ALU_COST);
6377
6378 format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addL" %}
6379
6380 ins_encode %{
6381 __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
6382 %}
6383
6384 ins_pipe(pipe_serial);
6385 %}
6386
6387 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr)
6388 %{
6389 predicate(n->as_LoadStore()->result_not_used());
6390
6391 match(Set dummy (GetAndAddL mem incr));
6392
6393 ins_cost(ALU_COST);
6394
6395 format %{ "get_and_addL [$mem], $incr\t#@get_and_addL_no_res" %}
6396
6397 ins_encode %{
6398 __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
6399 %}
6400
6401 ins_pipe(pipe_serial);
6402 %}
6403
6404 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAdd incr)
6405 %{
6406 match(Set newval (GetAndAddL mem incr));
6407
6408 ins_cost(ALU_COST);
6409
6410 format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addLi" %}
6411
6412 ins_encode %{
6413 __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
6414 %}
6415
6416 ins_pipe(pipe_serial);
6417 %}
6418
6419 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAdd incr)
6420 %{
6421 predicate(n->as_LoadStore()->result_not_used());
6422
6423 match(Set dummy (GetAndAddL mem incr));
6424
6425 ins_cost(ALU_COST);
6426
6427 format %{ "get_and_addL [$mem], $incr\t#@get_and_addLi_no_res" %}
6428
6429 ins_encode %{
6430 __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
6431 %}
6432
6433 ins_pipe(pipe_serial);
6434 %}
6435
6436 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr)
6437 %{
6438 match(Set newval (GetAndAddI mem incr));
6439
6440 ins_cost(ALU_COST);
6441
6442 format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addI" %}
6443
6444 ins_encode %{
6445 __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
6446 %}
6447
6448 ins_pipe(pipe_serial);
6449 %}
6450
6451 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr)
6452 %{
6453 predicate(n->as_LoadStore()->result_not_used());
6454
6455 match(Set dummy (GetAndAddI mem incr));
6456
6457 ins_cost(ALU_COST);
6458
6459 format %{ "get_and_addI [$mem], $incr\t#@get_and_addI_no_res" %}
6460
6461 ins_encode %{
6462 __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
6463 %}
6464
6465 ins_pipe(pipe_serial);
6466 %}
6467
6468 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAdd incr)
6469 %{
6470 match(Set newval (GetAndAddI mem incr));
6471
6472 ins_cost(ALU_COST);
6473
6474 format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addIi" %}
6475
6476 ins_encode %{
6477 __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
6478 %}
6479
6480 ins_pipe(pipe_serial);
6481 %}
6482
6483 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAdd incr)
6484 %{
6485 predicate(n->as_LoadStore()->result_not_used());
6486
6487 match(Set dummy (GetAndAddI mem incr));
6488
6489 ins_cost(ALU_COST);
6490
6491 format %{ "get_and_addI [$mem], $incr\t#@get_and_addIi_no_res" %}
6492
6493 ins_encode %{
6494 __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
6495 %}
6496
6497 ins_pipe(pipe_serial);
6498 %}
6499
6500 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr)
6501 %{
6502 predicate(needs_acquiring_load_reserved(n));
6503
6504 match(Set newval (GetAndAddL mem incr));
6505
6506 ins_cost(ALU_COST);
6507
6508 format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLAcq" %}
6509
6510 ins_encode %{
6511 __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
6512 %}
6513
6514 ins_pipe(pipe_serial);
6515 %}
6516
6517 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
6518 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6519
6520 match(Set dummy (GetAndAddL mem incr));
6521
6522 ins_cost(ALU_COST);
6523
6524 format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addL_no_resAcq" %}
6525
6526 ins_encode %{
6527 __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
6528 %}
6529
6530 ins_pipe(pipe_serial);
6531 %}
6532
6533 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAdd incr)
6534 %{
6535 predicate(needs_acquiring_load_reserved(n));
6536
6537 match(Set newval (GetAndAddL mem incr));
6538
6539 ins_cost(ALU_COST);
6540
6541 format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLiAcq" %}
6542
6543 ins_encode %{
6544 __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
6545 %}
6546
6547 ins_pipe(pipe_serial);
6548 %}
6549
6550 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAdd incr)
6551 %{
6552 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6553
6554 match(Set dummy (GetAndAddL mem incr));
6555
6556 ins_cost(ALU_COST);
6557
6558 format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addLi_no_resAcq" %}
6559
6560 ins_encode %{
6561 __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
6562 %}
6563
6564 ins_pipe(pipe_serial);
6565 %}
6566
6567 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr)
6568 %{
6569 predicate(needs_acquiring_load_reserved(n));
6570
6571 match(Set newval (GetAndAddI mem incr));
6572
6573 ins_cost(ALU_COST);
6574
6575 format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIAcq" %}
6576
6577 ins_encode %{
6578 __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
6579 %}
6580
6581 ins_pipe(pipe_serial);
6582 %}
6583
6584 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr)
6585 %{
6586 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6587
6588 match(Set dummy (GetAndAddI mem incr));
6589
6590 ins_cost(ALU_COST);
6591
6592 format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addI_no_resAcq" %}
6593
6594 ins_encode %{
6595 __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
6596 %}
6597
6598 ins_pipe(pipe_serial);
6599 %}
6600
6601 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAdd incr)
6602 %{
6603 predicate(needs_acquiring_load_reserved(n));
6604
6605 match(Set newval (GetAndAddI mem incr));
6606
6607 ins_cost(ALU_COST);
6608
6609 format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIiAcq" %}
6610
6611 ins_encode %{
6612 __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
6613 %}
6614
6615 ins_pipe(pipe_serial);
6616 %}
6617
6618 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAdd incr)
6619 %{
6620 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6621
6622 match(Set dummy (GetAndAddI mem incr));
6623
6624 ins_cost(ALU_COST);
6625
6626 format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addIi_no_resAcq" %}
6627
6628 ins_encode %{
6629 __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
6630 %}
6631
6632 ins_pipe(pipe_serial);
6633 %}
6634
6635 // ============================================================================
6636 // Arithmetic Instructions
6637 //
6638
6639 // Integer Addition
6640
6641 // TODO
6642 // these currently employ operations which do not set CR and hence are
6643 // not flagged as killing CR but we would like to isolate the cases
6644 // where we want to set flags from those where we don't. need to work
6645 // out how to do that.
6646 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6647 match(Set dst (AddI src1 src2));
6648
6649 ins_cost(ALU_COST);
6650 format %{ "addw $dst, $src1, $src2\t#@addI_reg_reg" %}
6651
6652 ins_encode %{
6653 __ addw(as_Register($dst$$reg),
6654 as_Register($src1$$reg),
6655 as_Register($src2$$reg));
6656 %}
6657
6658 ins_pipe(ialu_reg_reg);
6659 %}
6660
6661 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAdd src2) %{
6662 match(Set dst (AddI src1 src2));
6663
6664 ins_cost(ALU_COST);
6665 format %{ "addiw $dst, $src1, $src2\t#@addI_reg_imm" %}
6666
6667 ins_encode %{
6668 __ addiw(as_Register($dst$$reg),
6669 as_Register($src1$$reg),
6670 $src2$$constant);
6671 %}
6672
6673 ins_pipe(ialu_reg_imm);
6674 %}
6675
6676 instruct addI_reg_imm_l2i(iRegINoSp dst, iRegL src1, immIAdd src2) %{
6677 match(Set dst (AddI (ConvL2I src1) src2));
6678
6679 ins_cost(ALU_COST);
6680 format %{ "addiw $dst, $src1, $src2\t#@addI_reg_imm_l2i" %}
6681
6682 ins_encode %{
6683 __ addiw(as_Register($dst$$reg),
6684 as_Register($src1$$reg),
6685 $src2$$constant);
6686 %}
6687
6688 ins_pipe(ialu_reg_imm);
6689 %}
6690
6691 // Pointer Addition
6692 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
6693 match(Set dst (AddP src1 src2));
6694
6695 ins_cost(ALU_COST);
6696 format %{ "add $dst, $src1, $src2\t# ptr, #@addP_reg_reg" %}
6697
6698 ins_encode %{
6699 __ add(as_Register($dst$$reg),
6700 as_Register($src1$$reg),
6701 as_Register($src2$$reg));
6702 %}
6703
6704 ins_pipe(ialu_reg_reg);
6705 %}
6706
6707 // If we shift more than 32 bits, we need not convert I2L.
6708 instruct lShiftL_regI_immGE32(iRegLNoSp dst, iRegI src, uimmI6_ge32 scale) %{
6709 match(Set dst (LShiftL (ConvI2L src) scale));
6710 ins_cost(ALU_COST);
6711 format %{ "slli $dst, $src, $scale & 63\t#@lShiftL_regI_immGE32" %}
6712
6713 ins_encode %{
6714 __ slli(as_Register($dst$$reg), as_Register($src$$reg), $scale$$constant & 63);
6715 %}
6716
6717 ins_pipe(ialu_reg_shift);
6718 %}
6719
6720 // Pointer Immediate Addition
6721 // n.b. this needs to be more expensive than using an indirect memory
6722 // operand
6723 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAdd src2) %{
6724 match(Set dst (AddP src1 src2));
6725 ins_cost(ALU_COST);
6726 format %{ "addi $dst, $src1, $src2\t# ptr, #@addP_reg_imm" %}
6727
6728 ins_encode %{
6729 __ addi(as_Register($dst$$reg),
6730 as_Register($src1$$reg),
6731 $src2$$constant);
6732 %}
6733
6734 ins_pipe(ialu_reg_imm);
6735 %}
6736
6737 // Long Addition
6738 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6739 match(Set dst (AddL src1 src2));
6740 ins_cost(ALU_COST);
6741 format %{ "add $dst, $src1, $src2\t#@addL_reg_reg" %}
6742
6743 ins_encode %{
6744 __ add(as_Register($dst$$reg),
6745 as_Register($src1$$reg),
6746 as_Register($src2$$reg));
6747 %}
6748
6749 ins_pipe(ialu_reg_reg);
6750 %}
6751
6752 // No constant pool entries requiredLong Immediate Addition.
6753 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
6754 match(Set dst (AddL src1 src2));
6755 ins_cost(ALU_COST);
6756 format %{ "addi $dst, $src1, $src2\t#@addL_reg_imm" %}
6757
6758 ins_encode %{
6759 // src2 is imm, so actually call the addi
6760 __ addi(as_Register($dst$$reg),
6761 as_Register($src1$$reg),
6762 $src2$$constant);
6763 %}
6764
6765 ins_pipe(ialu_reg_imm);
6766 %}
6767
6768 // Integer Subtraction
6769 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6770 match(Set dst (SubI src1 src2));
6771
6772 ins_cost(ALU_COST);
6773 format %{ "subw $dst, $src1, $src2\t#@subI_reg_reg" %}
6774
6775 ins_encode %{
6776 __ subw(as_Register($dst$$reg),
6777 as_Register($src1$$reg),
6778 as_Register($src2$$reg));
6779 %}
6780
6781 ins_pipe(ialu_reg_reg);
6782 %}
6783
6784 // Immediate Subtraction
6785 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immISub src2) %{
6786 match(Set dst (SubI src1 src2));
6787
6788 ins_cost(ALU_COST);
6789 format %{ "addiw $dst, $src1, -$src2\t#@subI_reg_imm" %}
6790
6791 ins_encode %{
6792 // src2 is imm, so actually call the addiw
6793 __ subiw(as_Register($dst$$reg),
6794 as_Register($src1$$reg),
6795 $src2$$constant);
6796 %}
6797
6798 ins_pipe(ialu_reg_imm);
6799 %}
6800
6801 // Long Subtraction
6802 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6803 match(Set dst (SubL src1 src2));
6804 ins_cost(ALU_COST);
6805 format %{ "sub $dst, $src1, $src2\t#@subL_reg_reg" %}
6806
6807 ins_encode %{
6808 __ sub(as_Register($dst$$reg),
6809 as_Register($src1$$reg),
6810 as_Register($src2$$reg));
6811 %}
6812
6813 ins_pipe(ialu_reg_reg);
6814 %}
6815
6816 // No constant pool entries requiredLong Immediate Subtraction.
6817 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLSub src2) %{
6818 match(Set dst (SubL src1 src2));
6819 ins_cost(ALU_COST);
6820 format %{ "addi $dst, $src1, -$src2\t#@subL_reg_imm" %}
6821
6822 ins_encode %{
6823 // src2 is imm, so actually call the addi
6824 __ subi(as_Register($dst$$reg),
6825 as_Register($src1$$reg),
6826 $src2$$constant);
6827 %}
6828
6829 ins_pipe(ialu_reg_imm);
6830 %}
6831
6832 // Integer Negation (special case for sub)
6833
6834 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
6835 match(Set dst (SubI zero src));
6836 ins_cost(ALU_COST);
6837 format %{ "subw $dst, x0, $src\t# int, #@negI_reg" %}
6838
6839 ins_encode %{
6840 // actually call the subw
6841 __ negw(as_Register($dst$$reg),
6842 as_Register($src$$reg));
6843 %}
6844
6845 ins_pipe(ialu_reg);
6846 %}
6847
6848 // Long Negation
6849
6850 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero) %{
6851 match(Set dst (SubL zero src));
6852 ins_cost(ALU_COST);
6853 format %{ "sub $dst, x0, $src\t# long, #@negL_reg" %}
6854
6855 ins_encode %{
6856 // actually call the sub
6857 __ neg(as_Register($dst$$reg),
6858 as_Register($src$$reg));
6859 %}
6860
6861 ins_pipe(ialu_reg);
6862 %}
6863
6864 // Integer Multiply
6865
6866 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6867 match(Set dst (MulI src1 src2));
6868 ins_cost(IMUL_COST);
6869 format %{ "mulw $dst, $src1, $src2\t#@mulI" %}
6870
6871 //this means 2 word multi, and no sign extend to 64 bits
6872 ins_encode %{
6873 // riscv64 mulw will sign-extension to high 32 bits in dst reg
6874 __ mulw(as_Register($dst$$reg),
6875 as_Register($src1$$reg),
6876 as_Register($src2$$reg));
6877 %}
6878
6879 ins_pipe(imul_reg_reg);
6880 %}
6881
6882 // Long Multiply
6883
6884 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6885 match(Set dst (MulL src1 src2));
6886 ins_cost(IMUL_COST);
6887 format %{ "mul $dst, $src1, $src2\t#@mulL" %}
6888
6889 ins_encode %{
6890 __ mul(as_Register($dst$$reg),
6891 as_Register($src1$$reg),
6892 as_Register($src2$$reg));
6893 %}
6894
6895 ins_pipe(lmul_reg_reg);
6896 %}
6897
6898 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
6899 %{
6900 match(Set dst (MulHiL src1 src2));
6901 ins_cost(IMUL_COST);
6902 format %{ "mulh $dst, $src1, $src2\t# mulhi, #@mulHiL_rReg" %}
6903
6904 ins_encode %{
6905 __ mulh(as_Register($dst$$reg),
6906 as_Register($src1$$reg),
6907 as_Register($src2$$reg));
6908 %}
6909
6910 ins_pipe(lmul_reg_reg);
6911 %}
6912
6913 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
6914 %{
6915 match(Set dst (UMulHiL src1 src2));
6916 ins_cost(IMUL_COST);
6917 format %{ "mulhu $dst, $src1, $src2\t# umulhi, #@umulHiL_rReg" %}
6918
6919 ins_encode %{
6920 __ mulhu(as_Register($dst$$reg),
6921 as_Register($src1$$reg),
6922 as_Register($src2$$reg));
6923 %}
6924
6925 ins_pipe(lmul_reg_reg);
6926 %}
6927
6928 // Integer Divide
6929
6930 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6931 match(Set dst (DivI src1 src2));
6932 ins_cost(IDIVSI_COST);
6933 format %{ "divw $dst, $src1, $src2\t#@divI"%}
6934
6935 ins_encode %{
6936 __ divw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
6937 %}
6938 ins_pipe(idiv_reg_reg);
6939 %}
6940
6941 instruct UdivI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6942 match(Set dst (UDivI src1 src2));
6943 ins_cost(IDIVSI_COST);
6944 format %{ "divuw $dst, $src1, $src2\t#@UdivI"%}
6945
6946 ins_encode %{
6947 __ divuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
6948 %}
6949 ins_pipe(idiv_reg_reg);
6950 %}
6951
6952 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
6953 match(Set dst (URShiftI (RShiftI src1 div1) div2));
6954 ins_cost(ALU_COST);
6955 format %{ "srliw $dst, $src1, $div1\t# int signExtract, #@signExtract" %}
6956
6957 ins_encode %{
6958 __ srliw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
6959 %}
6960 ins_pipe(ialu_reg_shift);
6961 %}
6962
6963 // Long Divide
6964
6965 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6966 match(Set dst (DivL src1 src2));
6967 ins_cost(IDIVDI_COST);
6968 format %{ "div $dst, $src1, $src2\t#@divL" %}
6969
6970 ins_encode %{
6971 __ div(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
6972 %}
6973 ins_pipe(ldiv_reg_reg);
6974 %}
6975
6976 instruct UdivL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6977 match(Set dst (UDivL src1 src2));
6978 ins_cost(IDIVDI_COST);
6979
6980 format %{ "divu $dst, $src1, $src2\t#@UdivL" %}
6981
6982 ins_encode %{
6983 __ divu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
6984 %}
6985 ins_pipe(ldiv_reg_reg);
6986 %}
6987
6988 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
6989 match(Set dst (URShiftL (RShiftL src1 div1) div2));
6990 ins_cost(ALU_COST);
6991 format %{ "srli $dst, $src1, $div1\t# long signExtract, #@signExtractL" %}
6992
6993 ins_encode %{
6994 __ srli(as_Register($dst$$reg), as_Register($src1$$reg), 63);
6995 %}
6996 ins_pipe(ialu_reg_shift);
6997 %}
6998
6999 // Integer Remainder
7000
7001 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7002 match(Set dst (ModI src1 src2));
7003 ins_cost(IDIVSI_COST);
7004 format %{ "remw $dst, $src1, $src2\t#@modI" %}
7005
7006 ins_encode %{
7007 __ remw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7008 %}
7009 ins_pipe(ialu_reg_reg);
7010 %}
7011
7012 instruct UmodI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7013 match(Set dst (UModI src1 src2));
7014 ins_cost(IDIVSI_COST);
7015 format %{ "remuw $dst, $src1, $src2\t#@UmodI" %}
7016
7017 ins_encode %{
7018 __ remuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7019 %}
7020 ins_pipe(ialu_reg_reg);
7021 %}
7022
7023 // Long Remainder
7024
7025 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
7026 match(Set dst (ModL src1 src2));
7027 ins_cost(IDIVDI_COST);
7028 format %{ "rem $dst, $src1, $src2\t#@modL" %}
7029
7030 ins_encode %{
7031 __ rem(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7032 %}
7033 ins_pipe(ialu_reg_reg);
7034 %}
7035
7036 instruct UmodL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
7037 match(Set dst (UModL src1 src2));
7038 ins_cost(IDIVDI_COST);
7039 format %{ "remu $dst, $src1, $src2\t#@UmodL" %}
7040
7041 ins_encode %{
7042 __ remu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7043 %}
7044 ins_pipe(ialu_reg_reg);
7045 %}
7046
7047 // Integer Shifts
7048
7049 // Shift Left Register
7050 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
7051 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7052 match(Set dst (LShiftI src1 src2));
7053 ins_cost(ALU_COST);
7054 format %{ "sllw $dst, $src1, $src2\t#@lShiftI_reg_reg" %}
7055
7056 ins_encode %{
7057 __ sllw(as_Register($dst$$reg),
7058 as_Register($src1$$reg),
7059 as_Register($src2$$reg));
7060 %}
7061
7062 ins_pipe(ialu_reg_reg_vshift);
7063 %}
7064
7065 // Shift Left Immediate
7066 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
7067 match(Set dst (LShiftI src1 src2));
7068 ins_cost(ALU_COST);
7069 format %{ "slliw $dst, $src1, ($src2 & 0x1f)\t#@lShiftI_reg_imm" %}
7070
7071 ins_encode %{
7072 // the shift amount is encoded in the lower
7073 // 5 bits of the I-immediate field for RV32I
7074 __ slliw(as_Register($dst$$reg),
7075 as_Register($src1$$reg),
7076 (unsigned) $src2$$constant & 0x1f);
7077 %}
7078
7079 ins_pipe(ialu_reg_shift);
7080 %}
7081
7082 // Shift Right Logical Register
7083 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
7084 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7085 match(Set dst (URShiftI src1 src2));
7086 ins_cost(ALU_COST);
7087 format %{ "srlw $dst, $src1, $src2\t#@urShiftI_reg_reg" %}
7088
7089 ins_encode %{
7090 __ srlw(as_Register($dst$$reg),
7091 as_Register($src1$$reg),
7092 as_Register($src2$$reg));
7093 %}
7094
7095 ins_pipe(ialu_reg_reg_vshift);
7096 %}
7097
7098 // Shift Right Logical Immediate
7099 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
7100 match(Set dst (URShiftI src1 src2));
7101 ins_cost(ALU_COST);
7102 format %{ "srliw $dst, $src1, ($src2 & 0x1f)\t#@urShiftI_reg_imm" %}
7103
7104 ins_encode %{
7105 // the shift amount is encoded in the lower
7106 // 6 bits of the I-immediate field for RV64I
7107 __ srliw(as_Register($dst$$reg),
7108 as_Register($src1$$reg),
7109 (unsigned) $src2$$constant & 0x1f);
7110 %}
7111
7112 ins_pipe(ialu_reg_shift);
7113 %}
7114
7115 // Shift Right Arithmetic Register
7116 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
7117 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7118 match(Set dst (RShiftI src1 src2));
7119 ins_cost(ALU_COST);
7120 format %{ "sraw $dst, $src1, $src2\t#@rShiftI_reg_reg" %}
7121
7122 ins_encode %{
7123 // riscv will sign-ext dst high 32 bits
7124 __ sraw(as_Register($dst$$reg),
7125 as_Register($src1$$reg),
7126 as_Register($src2$$reg));
7127 %}
7128
7129 ins_pipe(ialu_reg_reg_vshift);
7130 %}
7131
7132 // Shift Right Arithmetic Immediate
7133 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
7134 match(Set dst (RShiftI src1 src2));
7135 ins_cost(ALU_COST);
7136 format %{ "sraiw $dst, $src1, ($src2 & 0x1f)\t#@rShiftI_reg_imm" %}
7137
7138 ins_encode %{
7139 // riscv will sign-ext dst high 32 bits
7140 __ sraiw(as_Register($dst$$reg),
7141 as_Register($src1$$reg),
7142 (unsigned) $src2$$constant & 0x1f);
7143 %}
7144
7145 ins_pipe(ialu_reg_shift);
7146 %}
7147
7148 // Long Shifts
7149
7150 // Shift Left Register
7151 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
7152 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
7153 match(Set dst (LShiftL src1 src2));
7154
7155 ins_cost(ALU_COST);
7156 format %{ "sll $dst, $src1, $src2\t#@lShiftL_reg_reg" %}
7157
7158 ins_encode %{
7159 __ sll(as_Register($dst$$reg),
7160 as_Register($src1$$reg),
7161 as_Register($src2$$reg));
7162 %}
7163
7164 ins_pipe(ialu_reg_reg_vshift);
7165 %}
7166
7167 // Shift Left Immediate
7168 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
7169 match(Set dst (LShiftL src1 src2));
7170
7171 ins_cost(ALU_COST);
7172 format %{ "slli $dst, $src1, ($src2 & 0x3f)\t#@lShiftL_reg_imm" %}
7173
7174 ins_encode %{
7175 // the shift amount is encoded in the lower
7176 // 6 bits of the I-immediate field for RV64I
7177 __ slli(as_Register($dst$$reg),
7178 as_Register($src1$$reg),
7179 (unsigned) $src2$$constant & 0x3f);
7180 %}
7181
7182 ins_pipe(ialu_reg_shift);
7183 %}
7184
7185 // Shift Right Logical Register
7186 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
7187 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
7188 match(Set dst (URShiftL src1 src2));
7189
7190 ins_cost(ALU_COST);
7191 format %{ "srl $dst, $src1, $src2\t#@urShiftL_reg_reg" %}
7192
7193 ins_encode %{
7194 __ srl(as_Register($dst$$reg),
7195 as_Register($src1$$reg),
7196 as_Register($src2$$reg));
7197 %}
7198
7199 ins_pipe(ialu_reg_reg_vshift);
7200 %}
7201
7202 // Shift Right Logical Immediate
7203 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
7204 match(Set dst (URShiftL src1 src2));
7205
7206 ins_cost(ALU_COST);
7207 format %{ "srli $dst, $src1, ($src2 & 0x3f)\t#@urShiftL_reg_imm" %}
7208
7209 ins_encode %{
7210 // the shift amount is encoded in the lower
7211 // 6 bits of the I-immediate field for RV64I
7212 __ srli(as_Register($dst$$reg),
7213 as_Register($src1$$reg),
7214 (unsigned) $src2$$constant & 0x3f);
7215 %}
7216
7217 ins_pipe(ialu_reg_shift);
7218 %}
7219
7220 // A special-case pattern for card table stores.
7221 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
7222 match(Set dst (URShiftL (CastP2X src1) src2));
7223
7224 ins_cost(ALU_COST);
7225 format %{ "srli $dst, p2x($src1), ($src2 & 0x3f)\t#@urShiftP_reg_imm" %}
7226
7227 ins_encode %{
7228 // the shift amount is encoded in the lower
7229 // 6 bits of the I-immediate field for RV64I
7230 __ srli(as_Register($dst$$reg),
7231 as_Register($src1$$reg),
7232 (unsigned) $src2$$constant & 0x3f);
7233 %}
7234
7235 ins_pipe(ialu_reg_shift);
7236 %}
7237
7238 // Shift Right Arithmetic Register
7239 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
7240 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
7241 match(Set dst (RShiftL src1 src2));
7242
7243 ins_cost(ALU_COST);
7244 format %{ "sra $dst, $src1, $src2\t#@rShiftL_reg_reg" %}
7245
7246 ins_encode %{
7247 __ sra(as_Register($dst$$reg),
7248 as_Register($src1$$reg),
7249 as_Register($src2$$reg));
7250 %}
7251
7252 ins_pipe(ialu_reg_reg_vshift);
7253 %}
7254
7255 // Shift Right Arithmetic Immediate
7256 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
7257 match(Set dst (RShiftL src1 src2));
7258
7259 ins_cost(ALU_COST);
7260 format %{ "srai $dst, $src1, ($src2 & 0x3f)\t#@rShiftL_reg_imm" %}
7261
7262 ins_encode %{
7263 // the shift amount is encoded in the lower
7264 // 6 bits of the I-immediate field for RV64I
7265 __ srai(as_Register($dst$$reg),
7266 as_Register($src1$$reg),
7267 (unsigned) $src2$$constant & 0x3f);
7268 %}
7269
7270 ins_pipe(ialu_reg_shift);
7271 %}
7272
7273 instruct regI_not_reg(iRegINoSp dst, iRegI src1, immI_M1 m1) %{
7274 match(Set dst (XorI src1 m1));
7275 ins_cost(ALU_COST);
7276 format %{ "xori $dst, $src1, -1\t#@regI_not_reg" %}
7277
7278 ins_encode %{
7279 __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
7280 %}
7281
7282 ins_pipe(ialu_reg_imm);
7283 %}
7284
7285 instruct regL_not_reg(iRegLNoSp dst, iRegL src1, immL_M1 m1) %{
7286 match(Set dst (XorL src1 m1));
7287 ins_cost(ALU_COST);
7288 format %{ "xori $dst, $src1, -1\t#@regL_not_reg" %}
7289
7290 ins_encode %{
7291 __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
7292 %}
7293
7294 ins_pipe(ialu_reg_imm);
7295 %}
7296
7297
7298 // ============================================================================
7299 // Floating Point Arithmetic Instructions
7300
7301 instruct addF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7302 match(Set dst (AddF src1 src2));
7303
7304 ins_cost(DEFAULT_COST * 5);
7305 format %{ "fadd.s $dst, $src1, $src2\t#@addF_reg_reg" %}
7306
7307 ins_encode %{
7308 __ fadd_s(as_FloatRegister($dst$$reg),
7309 as_FloatRegister($src1$$reg),
7310 as_FloatRegister($src2$$reg));
7311 %}
7312
7313 ins_pipe(fp_dop_reg_reg_s);
7314 %}
7315
7316 instruct addD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7317 match(Set dst (AddD src1 src2));
7318
7319 ins_cost(DEFAULT_COST * 5);
7320 format %{ "fadd.d $dst, $src1, $src2\t#@addD_reg_reg" %}
7321
7322 ins_encode %{
7323 __ fadd_d(as_FloatRegister($dst$$reg),
7324 as_FloatRegister($src1$$reg),
7325 as_FloatRegister($src2$$reg));
7326 %}
7327
7328 ins_pipe(fp_dop_reg_reg_d);
7329 %}
7330
7331 instruct subF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7332 match(Set dst (SubF src1 src2));
7333
7334 ins_cost(DEFAULT_COST * 5);
7335 format %{ "fsub.s $dst, $src1, $src2\t#@subF_reg_reg" %}
7336
7337 ins_encode %{
7338 __ fsub_s(as_FloatRegister($dst$$reg),
7339 as_FloatRegister($src1$$reg),
7340 as_FloatRegister($src2$$reg));
7341 %}
7342
7343 ins_pipe(fp_dop_reg_reg_s);
7344 %}
7345
7346 instruct subD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7347 match(Set dst (SubD src1 src2));
7348
7349 ins_cost(DEFAULT_COST * 5);
7350 format %{ "fsub.d $dst, $src1, $src2\t#@subD_reg_reg" %}
7351
7352 ins_encode %{
7353 __ fsub_d(as_FloatRegister($dst$$reg),
7354 as_FloatRegister($src1$$reg),
7355 as_FloatRegister($src2$$reg));
7356 %}
7357
7358 ins_pipe(fp_dop_reg_reg_d);
7359 %}
7360
7361 instruct mulF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7362 match(Set dst (MulF src1 src2));
7363
7364 ins_cost(FMUL_SINGLE_COST);
7365 format %{ "fmul.s $dst, $src1, $src2\t#@mulF_reg_reg" %}
7366
7367 ins_encode %{
7368 __ fmul_s(as_FloatRegister($dst$$reg),
7369 as_FloatRegister($src1$$reg),
7370 as_FloatRegister($src2$$reg));
7371 %}
7372
7373 ins_pipe(fp_dop_reg_reg_s);
7374 %}
7375
7376 instruct mulD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7377 match(Set dst (MulD src1 src2));
7378
7379 ins_cost(FMUL_DOUBLE_COST);
7380 format %{ "fmul.d $dst, $src1, $src2\t#@mulD_reg_reg" %}
7381
7382 ins_encode %{
7383 __ fmul_d(as_FloatRegister($dst$$reg),
7384 as_FloatRegister($src1$$reg),
7385 as_FloatRegister($src2$$reg));
7386 %}
7387
7388 ins_pipe(fp_dop_reg_reg_d);
7389 %}
7390
7391 // src1 * src2 + src3
7392 instruct maddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7393 match(Set dst (FmaF src3 (Binary src1 src2)));
7394
7395 ins_cost(FMUL_SINGLE_COST);
7396 format %{ "fmadd.s $dst, $src1, $src2, $src3\t#@maddF_reg_reg" %}
7397
7398 ins_encode %{
7399 assert(UseFMA, "Needs FMA instructions support.");
7400 __ fmadd_s(as_FloatRegister($dst$$reg),
7401 as_FloatRegister($src1$$reg),
7402 as_FloatRegister($src2$$reg),
7403 as_FloatRegister($src3$$reg));
7404 %}
7405
7406 ins_pipe(pipe_class_default);
7407 %}
7408
7409 // src1 * src2 + src3
7410 instruct maddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7411 match(Set dst (FmaD src3 (Binary src1 src2)));
7412
7413 ins_cost(FMUL_DOUBLE_COST);
7414 format %{ "fmadd.d $dst, $src1, $src2, $src3\t#@maddD_reg_reg" %}
7415
7416 ins_encode %{
7417 assert(UseFMA, "Needs FMA instructions support.");
7418 __ fmadd_d(as_FloatRegister($dst$$reg),
7419 as_FloatRegister($src1$$reg),
7420 as_FloatRegister($src2$$reg),
7421 as_FloatRegister($src3$$reg));
7422 %}
7423
7424 ins_pipe(pipe_class_default);
7425 %}
7426
7427 // src1 * src2 - src3
7428 instruct msubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7429 match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
7430
7431 ins_cost(FMUL_SINGLE_COST);
7432 format %{ "fmsub.s $dst, $src1, $src2, $src3\t#@msubF_reg_reg" %}
7433
7434 ins_encode %{
7435 assert(UseFMA, "Needs FMA instructions support.");
7436 __ fmsub_s(as_FloatRegister($dst$$reg),
7437 as_FloatRegister($src1$$reg),
7438 as_FloatRegister($src2$$reg),
7439 as_FloatRegister($src3$$reg));
7440 %}
7441
7442 ins_pipe(pipe_class_default);
7443 %}
7444
7445 // src1 * src2 - src3
7446 instruct msubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7447 match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
7448
7449 ins_cost(FMUL_DOUBLE_COST);
7450 format %{ "fmsub.d $dst, $src1, $src2, $src3\t#@msubD_reg_reg" %}
7451
7452 ins_encode %{
7453 assert(UseFMA, "Needs FMA instructions support.");
7454 __ fmsub_d(as_FloatRegister($dst$$reg),
7455 as_FloatRegister($src1$$reg),
7456 as_FloatRegister($src2$$reg),
7457 as_FloatRegister($src3$$reg));
7458 %}
7459
7460 ins_pipe(pipe_class_default);
7461 %}
7462
7463 // src1 * (-src2) + src3
7464 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
7465 instruct nmsubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7466 match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
7467
7468 ins_cost(FMUL_SINGLE_COST);
7469 format %{ "fnmsub.s $dst, $src1, $src2, $src3\t#@nmsubF_reg_reg" %}
7470
7471 ins_encode %{
7472 assert(UseFMA, "Needs FMA instructions support.");
7473 __ fnmsub_s(as_FloatRegister($dst$$reg),
7474 as_FloatRegister($src1$$reg),
7475 as_FloatRegister($src2$$reg),
7476 as_FloatRegister($src3$$reg));
7477 %}
7478
7479 ins_pipe(pipe_class_default);
7480 %}
7481
7482 // src1 * (-src2) + src3
7483 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
7484 instruct nmsubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7485 match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
7486
7487 ins_cost(FMUL_DOUBLE_COST);
7488 format %{ "fnmsub.d $dst, $src1, $src2, $src3\t#@nmsubD_reg_reg" %}
7489
7490 ins_encode %{
7491 assert(UseFMA, "Needs FMA instructions support.");
7492 __ fnmsub_d(as_FloatRegister($dst$$reg),
7493 as_FloatRegister($src1$$reg),
7494 as_FloatRegister($src2$$reg),
7495 as_FloatRegister($src3$$reg));
7496 %}
7497
7498 ins_pipe(pipe_class_default);
7499 %}
7500
7501 // src1 * (-src2) - src3
7502 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
7503 instruct nmaddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7504 match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
7505
7506 ins_cost(FMUL_SINGLE_COST);
7507 format %{ "fnmadd.s $dst, $src1, $src2, $src3\t#@nmaddF_reg_reg" %}
7508
7509 ins_encode %{
7510 assert(UseFMA, "Needs FMA instructions support.");
7511 __ fnmadd_s(as_FloatRegister($dst$$reg),
7512 as_FloatRegister($src1$$reg),
7513 as_FloatRegister($src2$$reg),
7514 as_FloatRegister($src3$$reg));
7515 %}
7516
7517 ins_pipe(pipe_class_default);
7518 %}
7519
7520 // src1 * (-src2) - src3
7521 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
7522 instruct nmaddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7523 match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
7524
7525 ins_cost(FMUL_DOUBLE_COST);
7526 format %{ "fnmadd.d $dst, $src1, $src2, $src3\t#@nmaddD_reg_reg" %}
7527
7528 ins_encode %{
7529 assert(UseFMA, "Needs FMA instructions support.");
7530 __ fnmadd_d(as_FloatRegister($dst$$reg),
7531 as_FloatRegister($src1$$reg),
7532 as_FloatRegister($src2$$reg),
7533 as_FloatRegister($src3$$reg));
7534 %}
7535
7536 ins_pipe(pipe_class_default);
7537 %}
7538
7539 // Math.max(FF)F
7540 instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
7541 predicate(!UseZfa);
7542 match(Set dst (MaxF src1 src2));
7543 effect(KILL cr);
7544
7545 format %{ "maxF $dst, $src1, $src2" %}
7546
7547 ins_encode %{
7548 __ minmax_fp(as_FloatRegister($dst$$reg),
7549 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7550 __ FLOAT_TYPE::single_precision, false /* is_min */);
7551 %}
7552
7553 ins_pipe(pipe_class_default);
7554 %}
7555
7556 instruct maxF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
7557 predicate(UseZfa);
7558 match(Set dst (MaxF src1 src2));
7559
7560 format %{ "maxF $dst, $src1, $src2" %}
7561
7562 ins_encode %{
7563 __ fmaxm_s(as_FloatRegister($dst$$reg),
7564 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7565 %}
7566
7567 ins_pipe(pipe_class_default);
7568 %}
7569
7570 // Math.min(FF)F
7571 instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
7572 predicate(!UseZfa);
7573 match(Set dst (MinF src1 src2));
7574 effect(KILL cr);
7575
7576 format %{ "minF $dst, $src1, $src2" %}
7577
7578 ins_encode %{
7579 __ minmax_fp(as_FloatRegister($dst$$reg),
7580 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7581 __ FLOAT_TYPE::single_precision, true /* is_min */);
7582 %}
7583
7584 ins_pipe(pipe_class_default);
7585 %}
7586
7587 instruct minF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
7588 predicate(UseZfa);
7589 match(Set dst (MinF src1 src2));
7590
7591 format %{ "minF $dst, $src1, $src2" %}
7592
7593 ins_encode %{
7594 __ fminm_s(as_FloatRegister($dst$$reg),
7595 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7596 %}
7597
7598 ins_pipe(pipe_class_default);
7599 %}
7600
7601 // Math.max(DD)D
7602 instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
7603 predicate(!UseZfa);
7604 match(Set dst (MaxD src1 src2));
7605 effect(KILL cr);
7606
7607 format %{ "maxD $dst, $src1, $src2" %}
7608
7609 ins_encode %{
7610 __ minmax_fp(as_FloatRegister($dst$$reg),
7611 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7612 __ FLOAT_TYPE::double_precision, false /* is_min */);
7613 %}
7614
7615 ins_pipe(pipe_class_default);
7616 %}
7617
7618 instruct maxD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
7619 predicate(UseZfa);
7620 match(Set dst (MaxD src1 src2));
7621
7622 format %{ "maxD $dst, $src1, $src2" %}
7623
7624 ins_encode %{
7625 __ fmaxm_d(as_FloatRegister($dst$$reg),
7626 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7627 %}
7628
7629 ins_pipe(pipe_class_default);
7630 %}
7631
7632 // Math.min(DD)D
7633 instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
7634 predicate(!UseZfa);
7635 match(Set dst (MinD src1 src2));
7636 effect(KILL cr);
7637
7638 format %{ "minD $dst, $src1, $src2" %}
7639
7640 ins_encode %{
7641 __ minmax_fp(as_FloatRegister($dst$$reg),
7642 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7643 __ FLOAT_TYPE::double_precision, true /* is_min */);
7644 %}
7645
7646 ins_pipe(pipe_class_default);
7647 %}
7648
7649 instruct minD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
7650 predicate(UseZfa);
7651 match(Set dst (MinD src1 src2));
7652
7653 format %{ "minD $dst, $src1, $src2" %}
7654
7655 ins_encode %{
7656 __ fminm_d(as_FloatRegister($dst$$reg),
7657 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7658 %}
7659
7660 ins_pipe(pipe_class_default);
7661 %}
7662
7663 // Float.isInfinite
7664 instruct isInfiniteF_reg_reg(iRegINoSp dst, fRegF src)
7665 %{
7666 match(Set dst (IsInfiniteF src));
7667
7668 format %{ "isInfinite $dst, $src" %}
7669 ins_encode %{
7670 __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7671 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
7672 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7673 %}
7674
7675 ins_pipe(pipe_class_default);
7676 %}
7677
7678 // Double.isInfinite
7679 instruct isInfiniteD_reg_reg(iRegINoSp dst, fRegD src)
7680 %{
7681 match(Set dst (IsInfiniteD src));
7682
7683 format %{ "isInfinite $dst, $src" %}
7684 ins_encode %{
7685 __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7686 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
7687 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7688 %}
7689
7690 ins_pipe(pipe_class_default);
7691 %}
7692
7693 // Float.isFinite
7694 instruct isFiniteF_reg_reg(iRegINoSp dst, fRegF src)
7695 %{
7696 match(Set dst (IsFiniteF src));
7697
7698 format %{ "isFinite $dst, $src" %}
7699 ins_encode %{
7700 __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7701 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
7702 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7703 %}
7704
7705 ins_pipe(pipe_class_default);
7706 %}
7707
7708 // Double.isFinite
7709 instruct isFiniteD_reg_reg(iRegINoSp dst, fRegD src)
7710 %{
7711 match(Set dst (IsFiniteD src));
7712
7713 format %{ "isFinite $dst, $src" %}
7714 ins_encode %{
7715 __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7716 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
7717 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7718 %}
7719
7720 ins_pipe(pipe_class_default);
7721 %}
7722
7723 instruct divF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7724 match(Set dst (DivF src1 src2));
7725
7726 ins_cost(FDIV_COST);
7727 format %{ "fdiv.s $dst, $src1, $src2\t#@divF_reg_reg" %}
7728
7729 ins_encode %{
7730 __ fdiv_s(as_FloatRegister($dst$$reg),
7731 as_FloatRegister($src1$$reg),
7732 as_FloatRegister($src2$$reg));
7733 %}
7734
7735 ins_pipe(fp_div_s);
7736 %}
7737
7738 instruct divD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7739 match(Set dst (DivD src1 src2));
7740
7741 ins_cost(FDIV_COST);
7742 format %{ "fdiv.d $dst, $src1, $src2\t#@divD_reg_reg" %}
7743
7744 ins_encode %{
7745 __ fdiv_d(as_FloatRegister($dst$$reg),
7746 as_FloatRegister($src1$$reg),
7747 as_FloatRegister($src2$$reg));
7748 %}
7749
7750 ins_pipe(fp_div_d);
7751 %}
7752
7753 instruct negF_reg_reg(fRegF dst, fRegF src) %{
7754 match(Set dst (NegF src));
7755
7756 ins_cost(XFER_COST);
7757 format %{ "fsgnjn.s $dst, $src, $src\t#@negF_reg_reg" %}
7758
7759 ins_encode %{
7760 __ fneg_s(as_FloatRegister($dst$$reg),
7761 as_FloatRegister($src$$reg));
7762 %}
7763
7764 ins_pipe(fp_uop_s);
7765 %}
7766
7767 instruct negD_reg_reg(fRegD dst, fRegD src) %{
7768 match(Set dst (NegD src));
7769
7770 ins_cost(XFER_COST);
7771 format %{ "fsgnjn.d $dst, $src, $src\t#@negD_reg_reg" %}
7772
7773 ins_encode %{
7774 __ fneg_d(as_FloatRegister($dst$$reg),
7775 as_FloatRegister($src$$reg));
7776 %}
7777
7778 ins_pipe(fp_uop_d);
7779 %}
7780
7781 instruct absI_reg(iRegINoSp dst, iRegIorL2I src) %{
7782 match(Set dst (AbsI src));
7783
7784 ins_cost(ALU_COST * 3);
7785 format %{
7786 "sraiw t0, $src, 0x1f\n\t"
7787 "addw $dst, $src, t0\n\t"
7788 "xorr $dst, $dst, t0\t#@absI_reg"
7789 %}
7790
7791 ins_encode %{
7792 __ sraiw(t0, as_Register($src$$reg), 0x1f);
7793 __ addw(as_Register($dst$$reg), as_Register($src$$reg), t0);
7794 __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
7795 %}
7796
7797 ins_pipe(pipe_class_default);
7798 %}
7799
7800 instruct absL_reg(iRegLNoSp dst, iRegL src) %{
7801 match(Set dst (AbsL src));
7802
7803 ins_cost(ALU_COST * 3);
7804 format %{
7805 "srai t0, $src, 0x3f\n\t"
7806 "add $dst, $src, t0\n\t"
7807 "xorr $dst, $dst, t0\t#@absL_reg"
7808 %}
7809
7810 ins_encode %{
7811 __ srai(t0, as_Register($src$$reg), 0x3f);
7812 __ add(as_Register($dst$$reg), as_Register($src$$reg), t0);
7813 __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
7814 %}
7815
7816 ins_pipe(pipe_class_default);
7817 %}
7818
7819 instruct absF_reg(fRegF dst, fRegF src) %{
7820 match(Set dst (AbsF src));
7821
7822 ins_cost(XFER_COST);
7823 format %{ "fsgnjx.s $dst, $src, $src\t#@absF_reg" %}
7824 ins_encode %{
7825 __ fabs_s(as_FloatRegister($dst$$reg),
7826 as_FloatRegister($src$$reg));
7827 %}
7828
7829 ins_pipe(fp_uop_s);
7830 %}
7831
7832 instruct absD_reg(fRegD dst, fRegD src) %{
7833 match(Set dst (AbsD src));
7834
7835 ins_cost(XFER_COST);
7836 format %{ "fsgnjx.d $dst, $src, $src\t#@absD_reg" %}
7837 ins_encode %{
7838 __ fabs_d(as_FloatRegister($dst$$reg),
7839 as_FloatRegister($src$$reg));
7840 %}
7841
7842 ins_pipe(fp_uop_d);
7843 %}
7844
7845 instruct sqrtF_reg(fRegF dst, fRegF src) %{
7846 match(Set dst (SqrtF src));
7847
7848 ins_cost(FSQRT_COST);
7849 format %{ "fsqrt.s $dst, $src\t#@sqrtF_reg" %}
7850 ins_encode %{
7851 __ fsqrt_s(as_FloatRegister($dst$$reg),
7852 as_FloatRegister($src$$reg));
7853 %}
7854
7855 ins_pipe(fp_sqrt_s);
7856 %}
7857
7858 instruct sqrtD_reg(fRegD dst, fRegD src) %{
7859 match(Set dst (SqrtD src));
7860
7861 ins_cost(FSQRT_COST);
7862 format %{ "fsqrt.d $dst, $src\t#@sqrtD_reg" %}
7863 ins_encode %{
7864 __ fsqrt_d(as_FloatRegister($dst$$reg),
7865 as_FloatRegister($src$$reg));
7866 %}
7867
7868 ins_pipe(fp_sqrt_d);
7869 %}
7870
7871 // Round Instruction
7872 instruct roundD_reg(fRegD dst, fRegD src, immI rmode, iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3) %{
7873 match(Set dst (RoundDoubleMode src rmode));
7874 ins_cost(2 * XFER_COST + BRANCH_COST);
7875 effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3);
7876
7877 format %{ "RoundDoubleMode $src, $rmode" %}
7878 ins_encode %{
7879 __ round_double_mode(as_FloatRegister($dst$$reg),
7880 as_FloatRegister($src$$reg), $rmode$$constant, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
7881 %}
7882 ins_pipe(pipe_class_default);
7883 %}
7884
7885 // Copysign and signum intrinsics
7886
7887 instruct copySignD_reg(fRegD dst, fRegD src1, fRegD src2, immD zero) %{
7888 match(Set dst (CopySignD src1 (Binary src2 zero)));
7889 format %{ "CopySignD $dst $src1 $src2" %}
7890 ins_encode %{
7891 FloatRegister dst = as_FloatRegister($dst$$reg),
7892 src1 = as_FloatRegister($src1$$reg),
7893 src2 = as_FloatRegister($src2$$reg);
7894 __ fsgnj_d(dst, src1, src2);
7895 %}
7896 ins_pipe(fp_dop_reg_reg_d);
7897 %}
7898
7899 instruct copySignF_reg(fRegF dst, fRegF src1, fRegF src2) %{
7900 match(Set dst (CopySignF src1 src2));
7901 format %{ "CopySignF $dst $src1 $src2" %}
7902 ins_encode %{
7903 FloatRegister dst = as_FloatRegister($dst$$reg),
7904 src1 = as_FloatRegister($src1$$reg),
7905 src2 = as_FloatRegister($src2$$reg);
7906 __ fsgnj_s(dst, src1, src2);
7907 %}
7908 ins_pipe(fp_dop_reg_reg_s);
7909 %}
7910
7911 instruct signumD_reg(fRegD dst, immD zero, fRegD one) %{
7912 match(Set dst (SignumD dst (Binary zero one)));
7913 format %{ "signumD $dst, $dst" %}
7914 ins_encode %{
7915 __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), true /* is_double */);
7916 %}
7917 ins_pipe(pipe_class_default);
7918 %}
7919
7920 instruct signumF_reg(fRegF dst, immF zero, fRegF one) %{
7921 match(Set dst (SignumF dst (Binary zero one)));
7922 format %{ "signumF $dst, $dst" %}
7923 ins_encode %{
7924 __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), false /* is_double */);
7925 %}
7926 ins_pipe(pipe_class_default);
7927 %}
7928
7929 // Arithmetic Instructions End
7930
7931 // ============================================================================
7932 // Logical Instructions
7933
7934 // Register And
7935 instruct andI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
7936 match(Set dst (AndI src1 src2));
7937
7938 format %{ "andr $dst, $src1, $src2\t#@andI_reg_reg" %}
7939
7940 ins_cost(ALU_COST);
7941 ins_encode %{
7942 __ andr(as_Register($dst$$reg),
7943 as_Register($src1$$reg),
7944 as_Register($src2$$reg));
7945 %}
7946
7947 ins_pipe(ialu_reg_reg);
7948 %}
7949
7950 // Immediate And
7951 instruct andI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
7952 match(Set dst (AndI src1 src2));
7953
7954 format %{ "andi $dst, $src1, $src2\t#@andI_reg_imm" %}
7955
7956 ins_cost(ALU_COST);
7957 ins_encode %{
7958 __ andi(as_Register($dst$$reg),
7959 as_Register($src1$$reg),
7960 (int32_t)($src2$$constant));
7961 %}
7962
7963 ins_pipe(ialu_reg_imm);
7964 %}
7965
7966 // Register Or
7967 instruct orI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
7968 match(Set dst (OrI src1 src2));
7969
7970 format %{ "orr $dst, $src1, $src2\t#@orI_reg_reg" %}
7971
7972 ins_cost(ALU_COST);
7973 ins_encode %{
7974 __ orr(as_Register($dst$$reg),
7975 as_Register($src1$$reg),
7976 as_Register($src2$$reg));
7977 %}
7978
7979 ins_pipe(ialu_reg_reg);
7980 %}
7981
7982 // Immediate Or
7983 instruct orI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
7984 match(Set dst (OrI src1 src2));
7985
7986 format %{ "ori $dst, $src1, $src2\t#@orI_reg_imm" %}
7987
7988 ins_cost(ALU_COST);
7989 ins_encode %{
7990 __ ori(as_Register($dst$$reg),
7991 as_Register($src1$$reg),
7992 (int32_t)($src2$$constant));
7993 %}
7994
7995 ins_pipe(ialu_reg_imm);
7996 %}
7997
7998 // Register Xor
7999 instruct xorI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
8000 match(Set dst (XorI src1 src2));
8001
8002 format %{ "xorr $dst, $src1, $src2\t#@xorI_reg_reg" %}
8003
8004 ins_cost(ALU_COST);
8005 ins_encode %{
8006 __ xorr(as_Register($dst$$reg),
8007 as_Register($src1$$reg),
8008 as_Register($src2$$reg));
8009 %}
8010
8011 ins_pipe(ialu_reg_reg);
8012 %}
8013
8014 // Immediate Xor
8015 instruct xorI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
8016 match(Set dst (XorI src1 src2));
8017
8018 format %{ "xori $dst, $src1, $src2\t#@xorI_reg_imm" %}
8019
8020 ins_cost(ALU_COST);
8021 ins_encode %{
8022 __ xori(as_Register($dst$$reg),
8023 as_Register($src1$$reg),
8024 (int32_t)($src2$$constant));
8025 %}
8026
8027 ins_pipe(ialu_reg_imm);
8028 %}
8029
8030 // Register And Long
8031 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8032 match(Set dst (AndL src1 src2));
8033
8034 format %{ "andr $dst, $src1, $src2\t#@andL_reg_reg" %}
8035
8036 ins_cost(ALU_COST);
8037 ins_encode %{
8038 __ andr(as_Register($dst$$reg),
8039 as_Register($src1$$reg),
8040 as_Register($src2$$reg));
8041 %}
8042
8043 ins_pipe(ialu_reg_reg);
8044 %}
8045
8046 // Immediate And Long
8047 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
8048 match(Set dst (AndL src1 src2));
8049
8050 format %{ "andi $dst, $src1, $src2\t#@andL_reg_imm" %}
8051
8052 ins_cost(ALU_COST);
8053 ins_encode %{
8054 __ andi(as_Register($dst$$reg),
8055 as_Register($src1$$reg),
8056 (int32_t)($src2$$constant));
8057 %}
8058
8059 ins_pipe(ialu_reg_imm);
8060 %}
8061
8062 // Register Or Long
8063 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8064 match(Set dst (OrL src1 src2));
8065
8066 format %{ "orr $dst, $src1, $src2\t#@orL_reg_reg" %}
8067
8068 ins_cost(ALU_COST);
8069 ins_encode %{
8070 __ orr(as_Register($dst$$reg),
8071 as_Register($src1$$reg),
8072 as_Register($src2$$reg));
8073 %}
8074
8075 ins_pipe(ialu_reg_reg);
8076 %}
8077
8078 // Immediate Or Long
8079 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
8080 match(Set dst (OrL src1 src2));
8081
8082 format %{ "ori $dst, $src1, $src2\t#@orL_reg_imm" %}
8083
8084 ins_cost(ALU_COST);
8085 ins_encode %{
8086 __ ori(as_Register($dst$$reg),
8087 as_Register($src1$$reg),
8088 (int32_t)($src2$$constant));
8089 %}
8090
8091 ins_pipe(ialu_reg_imm);
8092 %}
8093
8094 // Register Xor Long
8095 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8096 match(Set dst (XorL src1 src2));
8097
8098 format %{ "xorr $dst, $src1, $src2\t#@xorL_reg_reg" %}
8099
8100 ins_cost(ALU_COST);
8101 ins_encode %{
8102 __ xorr(as_Register($dst$$reg),
8103 as_Register($src1$$reg),
8104 as_Register($src2$$reg));
8105 %}
8106
8107 ins_pipe(ialu_reg_reg);
8108 %}
8109
8110 // Immediate Xor Long
8111 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
8112 match(Set dst (XorL src1 src2));
8113
8114 ins_cost(ALU_COST);
8115 format %{ "xori $dst, $src1, $src2\t#@xorL_reg_imm" %}
8116
8117 ins_encode %{
8118 __ xori(as_Register($dst$$reg),
8119 as_Register($src1$$reg),
8120 (int32_t)($src2$$constant));
8121 %}
8122
8123 ins_pipe(ialu_reg_imm);
8124 %}
8125
8126 // ============================================================================
8127 // MemBar Instruction
8128
8129 // RVTSO
8130
8131 instruct unnecessary_membar_rvtso() %{
8132 predicate(UseZtso);
8133 match(LoadFence);
8134 match(StoreFence);
8135 match(StoreStoreFence);
8136 match(MemBarAcquire);
8137 match(MemBarRelease);
8138 match(MemBarStoreStore);
8139 match(MemBarAcquireLock);
8140 match(MemBarReleaseLock);
8141
8142 ins_cost(0);
8143
8144 size(0);
8145
8146 format %{ "#@unnecessary_membar_rvtso elided/tso (empty encoding)" %}
8147 ins_encode %{
8148 __ block_comment("unnecessary_membar_rvtso");
8149 %}
8150 ins_pipe(real_empty);
8151 %}
8152
8153 instruct membar_volatile_rvtso() %{
8154 predicate(UseZtso);
8155 match(MemBarVolatile);
8156 ins_cost(VOLATILE_REF_COST);
8157
8158 format %{ "#@membar_volatile_rvtso\n\t"
8159 "fence w, r"%}
8160
8161 ins_encode %{
8162 __ block_comment("membar_volatile_rvtso");
8163 __ membar(MacroAssembler::StoreLoad);
8164 %}
8165
8166 ins_pipe(pipe_slow);
8167 %}
8168
8169 instruct unnecessary_membar_volatile_rvtso() %{
8170 predicate(UseZtso && Matcher::post_store_load_barrier(n));
8171 match(MemBarVolatile);
8172 ins_cost(0);
8173
8174 size(0);
8175
8176 format %{ "#@unnecessary_membar_volatile_rvtso (unnecessary so empty encoding)" %}
8177 ins_encode %{
8178 __ block_comment("unnecessary_membar_volatile_rvtso");
8179 %}
8180 ins_pipe(real_empty);
8181 %}
8182
8183 // RVWMO
8184
8185 instruct membar_aqcuire_rvwmo() %{
8186 predicate(!UseZtso);
8187 match(LoadFence);
8188 match(MemBarAcquire);
8189 ins_cost(VOLATILE_REF_COST);
8190
8191 format %{ "#@membar_aqcuire_rvwmo\n\t"
8192 "fence r, rw" %}
8193
8194 ins_encode %{
8195 __ block_comment("membar_aqcuire_rvwmo");
8196 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
8197 %}
8198 ins_pipe(pipe_serial);
8199 %}
8200
8201 instruct membar_release_rvwmo() %{
8202 predicate(!UseZtso);
8203 match(StoreFence);
8204 match(MemBarRelease);
8205 ins_cost(VOLATILE_REF_COST);
8206
8207 format %{ "#@membar_release_rvwmo\n\t"
8208 "fence rw, w" %}
8209
8210 ins_encode %{
8211 __ block_comment("membar_release_rvwmo");
8212 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
8213 %}
8214 ins_pipe(pipe_serial);
8215 %}
8216
8217 instruct membar_storestore_rvwmo() %{
8218 predicate(!UseZtso);
8219 match(MemBarStoreStore);
8220 match(StoreStoreFence);
8221 ins_cost(VOLATILE_REF_COST);
8222
8223 format %{ "#@membar_storestore_rvwmo\n\t"
8224 "fence w, w" %}
8225
8226 ins_encode %{
8227 __ membar(MacroAssembler::StoreStore);
8228 %}
8229 ins_pipe(pipe_serial);
8230 %}
8231
8232 instruct membar_volatile_rvwmo() %{
8233 predicate(!UseZtso);
8234 match(MemBarVolatile);
8235 ins_cost(VOLATILE_REF_COST);
8236
8237 format %{ "#@membar_volatile_rvwmo\n\t"
8238 "fence w, r"%}
8239
8240 ins_encode %{
8241 __ block_comment("membar_volatile_rvwmo");
8242 __ membar(MacroAssembler::StoreLoad);
8243 %}
8244
8245 ins_pipe(pipe_serial);
8246 %}
8247
8248 instruct membar_lock_rvwmo() %{
8249 predicate(!UseZtso);
8250 match(MemBarAcquireLock);
8251 match(MemBarReleaseLock);
8252 ins_cost(0);
8253
8254 format %{ "#@membar_lock_rvwmo (elided)" %}
8255
8256 ins_encode %{
8257 __ block_comment("membar_lock_rvwmo (elided)");
8258 %}
8259
8260 ins_pipe(pipe_serial);
8261 %}
8262
8263 instruct unnecessary_membar_volatile_rvwmo() %{
8264 predicate(!UseZtso && Matcher::post_store_load_barrier(n));
8265 match(MemBarVolatile);
8266 ins_cost(0);
8267
8268 size(0);
8269 format %{ "#@unnecessary_membar_volatile_rvwmo (unnecessary so empty encoding)" %}
8270 ins_encode %{
8271 __ block_comment("unnecessary_membar_volatile_rvwmo");
8272 %}
8273 ins_pipe(real_empty);
8274 %}
8275
8276 instruct spin_wait() %{
8277 predicate(UseZihintpause);
8278 match(OnSpinWait);
8279 ins_cost(CACHE_MISS_COST);
8280
8281 format %{ "spin_wait" %}
8282
8283 ins_encode %{
8284 __ pause();
8285 %}
8286
8287 ins_pipe(pipe_serial);
8288 %}
8289
8290 // ============================================================================
8291 // Cast Instructions (Java-level type cast)
8292
8293 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8294 match(Set dst (CastX2P src));
8295
8296 ins_cost(ALU_COST);
8297 format %{ "mv $dst, $src\t# long -> ptr, #@castX2P" %}
8298
8299 ins_encode %{
8300 if ($dst$$reg != $src$$reg) {
8301 __ mv(as_Register($dst$$reg), as_Register($src$$reg));
8302 }
8303 %}
8304
8305 ins_pipe(ialu_reg);
8306 %}
8307
8308 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8309 match(Set dst (CastP2X src));
8310
8311 ins_cost(ALU_COST);
8312 format %{ "mv $dst, $src\t# ptr -> long, #@castP2X" %}
8313
8314 ins_encode %{
8315 if ($dst$$reg != $src$$reg) {
8316 __ mv(as_Register($dst$$reg), as_Register($src$$reg));
8317 }
8318 %}
8319
8320 ins_pipe(ialu_reg);
8321 %}
8322
8323 instruct castPP(iRegPNoSp dst)
8324 %{
8325 match(Set dst (CastPP dst));
8326 ins_cost(0);
8327
8328 size(0);
8329 format %{ "# castPP of $dst, #@castPP" %}
8330 ins_encode(/* empty encoding */);
8331 ins_pipe(pipe_class_empty);
8332 %}
8333
8334 instruct castLL(iRegL dst)
8335 %{
8336 match(Set dst (CastLL dst));
8337
8338 size(0);
8339 format %{ "# castLL of $dst, #@castLL" %}
8340 ins_encode(/* empty encoding */);
8341 ins_cost(0);
8342 ins_pipe(pipe_class_empty);
8343 %}
8344
8345 instruct castII(iRegI dst)
8346 %{
8347 match(Set dst (CastII dst));
8348
8349 size(0);
8350 format %{ "# castII of $dst, #@castII" %}
8351 ins_encode(/* empty encoding */);
8352 ins_cost(0);
8353 ins_pipe(pipe_class_empty);
8354 %}
8355
8356 instruct checkCastPP(iRegPNoSp dst)
8357 %{
8358 match(Set dst (CheckCastPP dst));
8359
8360 size(0);
8361 ins_cost(0);
8362 format %{ "# checkcastPP of $dst, #@checkCastPP" %}
8363 ins_encode(/* empty encoding */);
8364 ins_pipe(pipe_class_empty);
8365 %}
8366
8367 instruct castHH(fRegF dst)
8368 %{
8369 match(Set dst (CastHH dst));
8370
8371 size(0);
8372 format %{ "# castHH of $dst" %}
8373 ins_encode(/* empty encoding */);
8374 ins_cost(0);
8375 ins_pipe(pipe_class_empty);
8376 %}
8377
8378 instruct castFF(fRegF dst)
8379 %{
8380 match(Set dst (CastFF dst));
8381
8382 size(0);
8383 format %{ "# castFF of $dst" %}
8384 ins_encode(/* empty encoding */);
8385 ins_cost(0);
8386 ins_pipe(pipe_class_empty);
8387 %}
8388
8389 instruct castDD(fRegD dst)
8390 %{
8391 match(Set dst (CastDD dst));
8392
8393 size(0);
8394 format %{ "# castDD of $dst" %}
8395 ins_encode(/* empty encoding */);
8396 ins_cost(0);
8397 ins_pipe(pipe_class_empty);
8398 %}
8399
8400 instruct castVV(vReg dst)
8401 %{
8402 match(Set dst (CastVV dst));
8403
8404 size(0);
8405 format %{ "# castVV of $dst" %}
8406 ins_encode(/* empty encoding */);
8407 ins_cost(0);
8408 ins_pipe(pipe_class_empty);
8409 %}
8410
8411 instruct castVVMask(vRegMask dst)
8412 %{
8413 match(Set dst (CastVV dst));
8414
8415 size(0);
8416 format %{ "# castVV of $dst" %}
8417 ins_encode(/* empty encoding */);
8418 ins_cost(0);
8419 ins_pipe(pipe_class_empty);
8420 %}
8421
8422 // ============================================================================
8423 // Convert Instructions
8424
8425 // int to bool
8426 instruct convI2Bool(iRegINoSp dst, iRegI src)
8427 %{
8428 match(Set dst (Conv2B src));
8429
8430 ins_cost(ALU_COST);
8431 format %{ "snez $dst, $src\t#@convI2Bool" %}
8432
8433 ins_encode %{
8434 __ snez(as_Register($dst$$reg), as_Register($src$$reg));
8435 %}
8436
8437 ins_pipe(ialu_reg);
8438 %}
8439
8440 // pointer to bool
8441 instruct convP2Bool(iRegINoSp dst, iRegP src)
8442 %{
8443 match(Set dst (Conv2B src));
8444
8445 ins_cost(ALU_COST);
8446 format %{ "snez $dst, $src\t#@convP2Bool" %}
8447
8448 ins_encode %{
8449 __ snez(as_Register($dst$$reg), as_Register($src$$reg));
8450 %}
8451
8452 ins_pipe(ialu_reg);
8453 %}
8454
8455 // int <-> long
8456
8457 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
8458 %{
8459 match(Set dst (ConvI2L src));
8460
8461 ins_cost(ALU_COST);
8462 format %{ "addw $dst, $src, zr\t#@convI2L_reg_reg" %}
8463 ins_encode %{
8464 __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
8465 %}
8466 ins_pipe(ialu_reg);
8467 %}
8468
8469 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
8470 match(Set dst (ConvL2I src));
8471
8472 ins_cost(ALU_COST);
8473 format %{ "addw $dst, $src, zr\t#@convL2I_reg" %}
8474
8475 ins_encode %{
8476 __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
8477 %}
8478
8479 ins_pipe(ialu_reg);
8480 %}
8481
8482 // int to unsigned long (Zero-extend)
8483 instruct convI2UL_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
8484 %{
8485 match(Set dst (AndL (ConvI2L src) mask));
8486
8487 ins_cost(ALU_COST * 2);
8488 format %{ "zext $dst, $src, 32\t# i2ul, #@convI2UL_reg_reg" %}
8489
8490 ins_encode %{
8491 __ zext(as_Register($dst$$reg), as_Register($src$$reg), 32);
8492 %}
8493
8494 ins_pipe(ialu_reg_shift);
8495 %}
8496
8497 // float <-> double
8498
8499 instruct convF2D_reg(fRegD dst, fRegF src) %{
8500 match(Set dst (ConvF2D src));
8501
8502 ins_cost(XFER_COST);
8503 format %{ "fcvt.d.s $dst, $src\t#@convF2D_reg" %}
8504
8505 ins_encode %{
8506 __ fcvt_d_s(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8507 %}
8508
8509 ins_pipe(fp_f2d);
8510 %}
8511
8512 instruct convD2F_reg(fRegF dst, fRegD src) %{
8513 match(Set dst (ConvD2F src));
8514
8515 ins_cost(XFER_COST);
8516 format %{ "fcvt.s.d $dst, $src\t#@convD2F_reg" %}
8517
8518 ins_encode %{
8519 __ fcvt_s_d(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8520 %}
8521
8522 ins_pipe(fp_d2f);
8523 %}
8524
8525 // single <-> half precision
8526
8527 instruct convHF2F_reg_reg(fRegF dst, iRegIorL2I src, iRegINoSp tmp) %{
8528 match(Set dst (ConvHF2F src));
8529 effect(TEMP tmp);
8530 format %{ "fmv.h.x $dst, $src\t# move source from $src to $dst\n\t"
8531 "fcvt.s.h $dst, $dst\t# convert half to single precision"
8532 %}
8533 ins_encode %{
8534 __ float16_to_float($dst$$FloatRegister, $src$$Register, $tmp$$Register);
8535 %}
8536 ins_pipe(pipe_slow);
8537 %}
8538
8539 instruct convF2HF_reg_reg(iRegINoSp dst, fRegF src, fRegF ftmp, iRegINoSp xtmp) %{
8540 match(Set dst (ConvF2HF src));
8541 effect(TEMP_DEF dst, TEMP ftmp, TEMP xtmp);
8542 format %{ "fcvt.h.s $ftmp, $src\t# convert single precision to half\n\t"
8543 "fmv.x.h $dst, $ftmp\t# move result from $ftmp to $dst"
8544 %}
8545 ins_encode %{
8546 __ float_to_float16($dst$$Register, $src$$FloatRegister, $ftmp$$FloatRegister, $xtmp$$Register);
8547 %}
8548 ins_pipe(pipe_slow);
8549 %}
8550
8551 // half precision operations
8552
8553 instruct reinterpretS2HF(fRegF dst, iRegI src)
8554 %{
8555 match(Set dst (ReinterpretS2HF src));
8556 format %{ "fmv.h.x $dst, $src\t# reinterpretS2HF" %}
8557 ins_encode %{
8558 __ fmv_h_x($dst$$FloatRegister, $src$$Register);
8559 %}
8560 ins_pipe(fp_i2f);
8561 %}
8562
8563 instruct convF2HFAndS2HF(fRegF dst, fRegF src)
8564 %{
8565 match(Set dst (ReinterpretS2HF (ConvF2HF src)));
8566 format %{ "convF2HFAndS2HF $dst, $src" %}
8567 ins_encode %{
8568 __ fcvt_h_s($dst$$FloatRegister, $src$$FloatRegister);
8569 %}
8570 ins_pipe(fp_uop_s);
8571 %}
8572
8573 instruct reinterpretHF2S(iRegINoSp dst, fRegF src)
8574 %{
8575 match(Set dst (ReinterpretHF2S src));
8576 format %{ "fmv.x.h $dst, $src\t# reinterpretHF2S" %}
8577 ins_encode %{
8578 __ fmv_x_h($dst$$Register, $src$$FloatRegister);
8579 %}
8580 ins_pipe(fp_f2i);
8581 %}
8582
8583 instruct convHF2SAndHF2F(fRegF dst, fRegF src)
8584 %{
8585 match(Set dst (ConvHF2F (ReinterpretHF2S src)));
8586 format %{ "convHF2SAndHF2F $dst, $src" %}
8587 ins_encode %{
8588 __ fcvt_s_h($dst$$FloatRegister, $src$$FloatRegister);
8589 %}
8590 ins_pipe(fp_uop_s);
8591 %}
8592
8593 instruct sqrt_HF_reg(fRegF dst, fRegF src)
8594 %{
8595 match(Set dst (SqrtHF src));
8596 format %{ "fsqrt.h $dst, $src" %}
8597 ins_encode %{
8598 __ fsqrt_h($dst$$FloatRegister, $src$$FloatRegister);
8599 %}
8600 ins_pipe(fp_sqrt_s);
8601 %}
8602
8603 instruct binOps_HF_reg(fRegF dst, fRegF src1, fRegF src2)
8604 %{
8605 match(Set dst (AddHF src1 src2));
8606 match(Set dst (SubHF src1 src2));
8607 match(Set dst (MulHF src1 src2));
8608 match(Set dst (DivHF src1 src2));
8609 format %{ "binop_hf $dst, $src1, $src2" %}
8610 ins_encode %{
8611 int opcode = this->ideal_Opcode();
8612 switch(opcode) {
8613 case Op_AddHF: __ fadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8614 case Op_SubHF: __ fsub_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8615 case Op_MulHF: __ fmul_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8616 case Op_DivHF: __ fdiv_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8617 default: assert(false, "%s is not supported here", NodeClassNames[opcode]); break;
8618 }
8619 %}
8620 ins_pipe(fp_dop_reg_reg_s);
8621 %}
8622
8623 instruct min_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
8624 %{
8625 predicate(!UseZfa);
8626 match(Set dst (MinHF src1 src2));
8627 effect(KILL cr);
8628
8629 format %{ "min_hf $dst, $src1, $src2" %}
8630
8631 ins_encode %{
8632 __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
8633 __ FLOAT_TYPE::half_precision, true /* is_min */);
8634 %}
8635 ins_pipe(pipe_class_default);
8636 %}
8637
8638 instruct min_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
8639 %{
8640 predicate(UseZfa);
8641 match(Set dst (MinHF src1 src2));
8642
8643 format %{ "min_hf $dst, $src1, $src2" %}
8644
8645 ins_encode %{
8646 __ fminm_h(as_FloatRegister($dst$$reg),
8647 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
8648 %}
8649
8650 ins_pipe(pipe_class_default);
8651 %}
8652
8653 instruct max_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
8654 %{
8655 predicate(!UseZfa);
8656 match(Set dst (MaxHF src1 src2));
8657 effect(KILL cr);
8658
8659 format %{ "max_hf $dst, $src1, $src2" %}
8660
8661 ins_encode %{
8662 __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
8663 __ FLOAT_TYPE::half_precision, false /* is_min */);
8664 %}
8665 ins_pipe(pipe_class_default);
8666 %}
8667
8668 instruct max_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
8669 %{
8670 predicate(UseZfa);
8671 match(Set dst (MaxHF src1 src2));
8672
8673 format %{ "max_hf $dst, $src1, $src2" %}
8674
8675 ins_encode %{
8676 __ fmaxm_h(as_FloatRegister($dst$$reg),
8677 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
8678 %}
8679
8680 ins_pipe(pipe_class_default);
8681 %}
8682
8683 instruct fma_HF_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3)
8684 %{
8685 match(Set dst (FmaHF src3 (Binary src1 src2)));
8686 format %{ "fmadd.h $dst, $src1, $src2, $src3\t# $dst = $src1 * $src2 + $src3 fma packedH" %}
8687 ins_encode %{
8688 __ fmadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
8689 %}
8690 ins_pipe(pipe_class_default);
8691 %}
8692
8693 // float <-> int
8694
8695 instruct convF2I_reg_reg(iRegINoSp dst, fRegF src) %{
8696 match(Set dst (ConvF2I src));
8697
8698 ins_cost(XFER_COST);
8699 format %{ "fcvt.w.s $dst, $src\t#@convF2I_reg_reg" %}
8700
8701 ins_encode %{
8702 __ fcvt_w_s_safe($dst$$Register, $src$$FloatRegister);
8703 %}
8704
8705 ins_pipe(fp_f2i);
8706 %}
8707
8708 instruct convI2F_reg_reg(fRegF dst, iRegIorL2I src) %{
8709 match(Set dst (ConvI2F src));
8710
8711 ins_cost(XFER_COST);
8712 format %{ "fcvt.s.w $dst, $src\t#@convI2F_reg_reg" %}
8713
8714 ins_encode %{
8715 __ fcvt_s_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8716 %}
8717
8718 ins_pipe(fp_i2f);
8719 %}
8720
8721 // float <-> long
8722
8723 instruct convF2L_reg_reg(iRegLNoSp dst, fRegF src) %{
8724 match(Set dst (ConvF2L src));
8725
8726 ins_cost(XFER_COST);
8727 format %{ "fcvt.l.s $dst, $src\t#@convF2L_reg_reg" %}
8728
8729 ins_encode %{
8730 __ fcvt_l_s_safe($dst$$Register, $src$$FloatRegister);
8731 %}
8732
8733 ins_pipe(fp_f2l);
8734 %}
8735
8736 instruct convL2F_reg_reg(fRegF dst, iRegL src) %{
8737 match(Set dst (ConvL2F src));
8738
8739 ins_cost(XFER_COST);
8740 format %{ "fcvt.s.l $dst, $src\t#@convL2F_reg_reg" %}
8741
8742 ins_encode %{
8743 __ fcvt_s_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8744 %}
8745
8746 ins_pipe(fp_l2f);
8747 %}
8748
8749 // double <-> int
8750
8751 instruct convD2I_reg_reg(iRegINoSp dst, fRegD src) %{
8752 match(Set dst (ConvD2I src));
8753
8754 ins_cost(XFER_COST);
8755 format %{ "fcvt.w.d $dst, $src\t#@convD2I_reg_reg" %}
8756
8757 ins_encode %{
8758 __ fcvt_w_d_safe($dst$$Register, $src$$FloatRegister);
8759 %}
8760
8761 ins_pipe(fp_d2i);
8762 %}
8763
8764 instruct convI2D_reg_reg(fRegD dst, iRegIorL2I src) %{
8765 match(Set dst (ConvI2D src));
8766
8767 ins_cost(XFER_COST);
8768 format %{ "fcvt.d.w $dst, $src\t#@convI2D_reg_reg" %}
8769
8770 ins_encode %{
8771 __ fcvt_d_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8772 %}
8773
8774 ins_pipe(fp_i2d);
8775 %}
8776
8777 // double <-> long
8778
8779 instruct convD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
8780 match(Set dst (ConvD2L src));
8781
8782 ins_cost(XFER_COST);
8783 format %{ "fcvt.l.d $dst, $src\t#@convD2L_reg_reg" %}
8784
8785 ins_encode %{
8786 __ fcvt_l_d_safe($dst$$Register, $src$$FloatRegister);
8787 %}
8788
8789 ins_pipe(fp_d2l);
8790 %}
8791
8792 instruct convL2D_reg_reg(fRegD dst, iRegL src) %{
8793 match(Set dst (ConvL2D src));
8794
8795 ins_cost(XFER_COST);
8796 format %{ "fcvt.d.l $dst, $src\t#@convL2D_reg_reg" %}
8797
8798 ins_encode %{
8799 __ fcvt_d_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8800 %}
8801
8802 ins_pipe(fp_l2d);
8803 %}
8804
8805 // Convert oop into int for vectors alignment masking
8806 instruct convP2I(iRegINoSp dst, iRegP src) %{
8807 match(Set dst (ConvL2I (CastP2X src)));
8808
8809 ins_cost(ALU_COST * 2);
8810 format %{ "zext $dst, $src, 32\t# ptr -> int, #@convP2I" %}
8811
8812 ins_encode %{
8813 __ zext($dst$$Register, $src$$Register, 32);
8814 %}
8815
8816 ins_pipe(ialu_reg);
8817 %}
8818
8819 // Convert compressed oop into int for vectors alignment masking
8820 // in case of 32bit oops (heap < 4Gb).
8821 instruct convN2I(iRegINoSp dst, iRegN src)
8822 %{
8823 predicate(CompressedOops::shift() == 0);
8824 match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8825
8826 ins_cost(ALU_COST);
8827 format %{ "mv $dst, $src\t# compressed ptr -> int, #@convN2I" %}
8828
8829 ins_encode %{
8830 __ mv($dst$$Register, $src$$Register);
8831 %}
8832
8833 ins_pipe(ialu_reg);
8834 %}
8835
8836 instruct round_double_reg(iRegLNoSp dst, fRegD src, fRegD ftmp) %{
8837 match(Set dst (RoundD src));
8838
8839 ins_cost(XFER_COST + BRANCH_COST);
8840 effect(TEMP ftmp);
8841 format %{ "java_round_double $dst, $src\t#@round_double_reg" %}
8842
8843 ins_encode %{
8844 __ java_round_double($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
8845 %}
8846
8847 ins_pipe(pipe_slow);
8848 %}
8849
8850 instruct round_float_reg(iRegINoSp dst, fRegF src, fRegF ftmp) %{
8851 match(Set dst (RoundF src));
8852
8853 ins_cost(XFER_COST + BRANCH_COST);
8854 effect(TEMP ftmp);
8855 format %{ "java_round_float $dst, $src\t#@round_float_reg" %}
8856
8857 ins_encode %{
8858 __ java_round_float($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
8859 %}
8860
8861 ins_pipe(pipe_slow);
8862 %}
8863
8864 // Convert oop pointer into compressed form
8865 instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{
8866 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8867 match(Set dst (EncodeP src));
8868 ins_cost(ALU_COST);
8869 format %{ "encode_heap_oop $dst, $src\t#@encodeHeapOop" %}
8870 ins_encode %{
8871 Register s = $src$$Register;
8872 Register d = $dst$$Register;
8873 __ encode_heap_oop(d, s);
8874 %}
8875 ins_pipe(pipe_class_default);
8876 %}
8877
8878 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src) %{
8879 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8880 match(Set dst (EncodeP src));
8881 ins_cost(ALU_COST);
8882 format %{ "encode_heap_oop_not_null $dst, $src\t#@encodeHeapOop_not_null" %}
8883 ins_encode %{
8884 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8885 %}
8886 ins_pipe(pipe_class_default);
8887 %}
8888
8889 instruct decodeHeapOop(iRegPNoSp dst, iRegN src) %{
8890 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8891 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8892 match(Set dst (DecodeN src));
8893
8894 ins_cost(0);
8895 format %{ "decode_heap_oop $dst, $src\t#@decodeHeapOop" %}
8896 ins_encode %{
8897 Register s = $src$$Register;
8898 Register d = $dst$$Register;
8899 __ decode_heap_oop(d, s);
8900 %}
8901 ins_pipe(pipe_class_default);
8902 %}
8903
8904 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src) %{
8905 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8906 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8907 match(Set dst (DecodeN src));
8908
8909 ins_cost(0);
8910 format %{ "decode_heap_oop_not_null $dst, $src\t#@decodeHeapOop_not_null" %}
8911 ins_encode %{
8912 Register s = $src$$Register;
8913 Register d = $dst$$Register;
8914 __ decode_heap_oop_not_null(d, s);
8915 %}
8916 ins_pipe(pipe_class_default);
8917 %}
8918
8919 // Convert klass pointer into compressed form.
8920 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8921 match(Set dst (EncodePKlass src));
8922
8923 ins_cost(ALU_COST);
8924 format %{ "encode_klass_not_null $dst, $src\t#@encodeKlass_not_null" %}
8925
8926 ins_encode %{
8927 Register src_reg = as_Register($src$$reg);
8928 Register dst_reg = as_Register($dst$$reg);
8929 __ encode_klass_not_null(dst_reg, src_reg, t0);
8930 %}
8931
8932 ins_pipe(pipe_class_default);
8933 %}
8934
8935 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src, iRegPNoSp tmp) %{
8936 match(Set dst (DecodeNKlass src));
8937
8938 effect(TEMP_DEF dst, TEMP tmp);
8939
8940 ins_cost(ALU_COST);
8941 format %{ "decode_klass_not_null $dst, $src\t#@decodeKlass_not_null" %}
8942
8943 ins_encode %{
8944 Register src_reg = as_Register($src$$reg);
8945 Register dst_reg = as_Register($dst$$reg);
8946 Register tmp_reg = as_Register($tmp$$reg);
8947 __ decode_klass_not_null(dst_reg, src_reg, tmp_reg);
8948 %}
8949
8950 ins_pipe(pipe_class_default);
8951 %}
8952
8953 // stack <-> reg and reg <-> reg shuffles with no conversion
8954
8955 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
8956
8957 match(Set dst (MoveF2I src));
8958
8959 effect(DEF dst, USE src);
8960
8961 ins_cost(LOAD_COST);
8962
8963 format %{ "lw $dst, $src\t#@MoveF2I_stack_reg" %}
8964
8965 ins_encode %{
8966 __ lw(as_Register($dst$$reg), Address(sp, $src$$disp));
8967 %}
8968
8969 ins_pipe(iload_reg_reg);
8970
8971 %}
8972
8973 instruct MoveI2F_stack_reg(fRegF dst, stackSlotI src) %{
8974
8975 match(Set dst (MoveI2F src));
8976
8977 effect(DEF dst, USE src);
8978
8979 ins_cost(LOAD_COST);
8980
8981 format %{ "flw $dst, $src\t#@MoveI2F_stack_reg" %}
8982
8983 ins_encode %{
8984 __ flw(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
8985 %}
8986
8987 ins_pipe(fp_load_mem_s);
8988
8989 %}
8990
8991 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
8992
8993 match(Set dst (MoveD2L src));
8994
8995 effect(DEF dst, USE src);
8996
8997 ins_cost(LOAD_COST);
8998
8999 format %{ "ld $dst, $src\t#@MoveD2L_stack_reg" %}
9000
9001 ins_encode %{
9002 __ ld(as_Register($dst$$reg), Address(sp, $src$$disp));
9003 %}
9004
9005 ins_pipe(iload_reg_reg);
9006
9007 %}
9008
9009 instruct MoveL2D_stack_reg(fRegD dst, stackSlotL src) %{
9010
9011 match(Set dst (MoveL2D src));
9012
9013 effect(DEF dst, USE src);
9014
9015 ins_cost(LOAD_COST);
9016
9017 format %{ "fld $dst, $src\t#@MoveL2D_stack_reg" %}
9018
9019 ins_encode %{
9020 __ fld(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
9021 %}
9022
9023 ins_pipe(fp_load_mem_d);
9024
9025 %}
9026
9027 instruct MoveF2I_reg_stack(stackSlotI dst, fRegF src) %{
9028
9029 match(Set dst (MoveF2I src));
9030
9031 effect(DEF dst, USE src);
9032
9033 ins_cost(STORE_COST);
9034
9035 format %{ "fsw $src, $dst\t#@MoveF2I_reg_stack" %}
9036
9037 ins_encode %{
9038 __ fsw(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
9039 %}
9040
9041 ins_pipe(fp_store_reg_s);
9042
9043 %}
9044
9045 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
9046
9047 match(Set dst (MoveI2F src));
9048
9049 effect(DEF dst, USE src);
9050
9051 ins_cost(STORE_COST);
9052
9053 format %{ "sw $src, $dst\t#@MoveI2F_reg_stack" %}
9054
9055 ins_encode %{
9056 __ sw(as_Register($src$$reg), Address(sp, $dst$$disp));
9057 %}
9058
9059 ins_pipe(istore_reg_reg);
9060
9061 %}
9062
9063 instruct MoveD2L_reg_stack(stackSlotL dst, fRegD src) %{
9064
9065 match(Set dst (MoveD2L src));
9066
9067 effect(DEF dst, USE src);
9068
9069 ins_cost(STORE_COST);
9070
9071 format %{ "fsd $dst, $src\t#@MoveD2L_reg_stack" %}
9072
9073 ins_encode %{
9074 __ fsd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
9075 %}
9076
9077 ins_pipe(fp_store_reg_d);
9078
9079 %}
9080
9081 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
9082
9083 match(Set dst (MoveL2D src));
9084
9085 effect(DEF dst, USE src);
9086
9087 ins_cost(STORE_COST);
9088
9089 format %{ "sd $src, $dst\t#@MoveL2D_reg_stack" %}
9090
9091 ins_encode %{
9092 __ sd(as_Register($src$$reg), Address(sp, $dst$$disp));
9093 %}
9094
9095 ins_pipe(istore_reg_reg);
9096
9097 %}
9098
9099 instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{
9100
9101 match(Set dst (MoveF2I src));
9102
9103 effect(DEF dst, USE src);
9104
9105 ins_cost(FMVX_COST);
9106
9107 format %{ "fmv.x.w $dst, $src\t#@MoveF2I_reg_reg" %}
9108
9109 ins_encode %{
9110 __ fmv_x_w(as_Register($dst$$reg), as_FloatRegister($src$$reg));
9111 %}
9112
9113 ins_pipe(fp_f2i);
9114
9115 %}
9116
9117 instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{
9118
9119 match(Set dst (MoveI2F src));
9120
9121 effect(DEF dst, USE src);
9122
9123 ins_cost(FMVX_COST);
9124
9125 format %{ "fmv.w.x $dst, $src\t#@MoveI2F_reg_reg" %}
9126
9127 ins_encode %{
9128 __ fmv_w_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
9129 %}
9130
9131 ins_pipe(fp_i2f);
9132
9133 %}
9134
9135 instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
9136
9137 match(Set dst (MoveD2L src));
9138
9139 effect(DEF dst, USE src);
9140
9141 ins_cost(FMVX_COST);
9142
9143 format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %}
9144
9145 ins_encode %{
9146 __ fmv_x_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
9147 %}
9148
9149 ins_pipe(fp_d2l);
9150
9151 %}
9152
9153 instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{
9154
9155 match(Set dst (MoveL2D src));
9156
9157 effect(DEF dst, USE src);
9158
9159 ins_cost(FMVX_COST);
9160
9161 format %{ "fmv.d.x $dst, $src\t#@MoveL2D_reg_reg" %}
9162
9163 ins_encode %{
9164 __ fmv_d_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
9165 %}
9166
9167 ins_pipe(fp_l2d);
9168
9169 %}
9170
9171 // ============================================================================
9172 // Compare Instructions which set the result float comparisons in dest register.
9173
9174 instruct cmpF3_reg_reg(iRegINoSp dst, fRegF op1, fRegF op2)
9175 %{
9176 match(Set dst (CmpF3 op1 op2));
9177
9178 ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
9179 format %{ "flt.s $dst, $op2, $op1\t#@cmpF3_reg_reg\n\t"
9180 "bgtz $dst, done\n\t"
9181 "feq.s $dst, $op1, $op2\n\t"
9182 "addi $dst, $dst, -1\n\t"
9183 "done:"
9184 %}
9185
9186 ins_encode %{
9187 // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
9188 __ float_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg),
9189 as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
9190 %}
9191
9192 ins_pipe(pipe_class_default);
9193 %}
9194
9195 instruct cmpD3_reg_reg(iRegINoSp dst, fRegD op1, fRegD op2)
9196 %{
9197 match(Set dst (CmpD3 op1 op2));
9198
9199 ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
9200 format %{ "flt.d $dst, $op2, $op1\t#@cmpD3_reg_reg\n\t"
9201 "bgtz $dst, done\n\t"
9202 "feq.d $dst, $op1, $op2\n\t"
9203 "addi $dst, $dst, -1\n\t"
9204 "done:"
9205 %}
9206
9207 ins_encode %{
9208 // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
9209 __ double_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
9210 %}
9211
9212 ins_pipe(pipe_class_default);
9213 %}
9214
9215 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
9216 %{
9217 match(Set dst (CmpL3 op1 op2));
9218
9219 ins_cost(ALU_COST * 3 + BRANCH_COST);
9220 format %{ "slt $dst, $op2, $op1\t#@cmpL3_reg_reg\n\t"
9221 "bnez $dst, done\n\t"
9222 "slt $dst, $op1, $op2\n\t"
9223 "neg $dst, $dst\n\t"
9224 "done:"
9225 %}
9226 ins_encode %{
9227 __ cmp_l2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
9228 __ mv(as_Register($dst$$reg), t0);
9229 %}
9230
9231 ins_pipe(pipe_class_default);
9232 %}
9233
9234 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
9235 %{
9236 match(Set dst (CmpUL3 op1 op2));
9237
9238 ins_cost(ALU_COST * 3 + BRANCH_COST);
9239 format %{ "sltu $dst, $op2, $op1\t#@cmpUL3_reg_reg\n\t"
9240 "bnez $dst, done\n\t"
9241 "sltu $dst, $op1, $op2\n\t"
9242 "neg $dst, $dst\n\t"
9243 "done:"
9244 %}
9245 ins_encode %{
9246 __ cmp_ul2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
9247 __ mv(as_Register($dst$$reg), t0);
9248 %}
9249
9250 ins_pipe(pipe_class_default);
9251 %}
9252
9253 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI op1, iRegI op2)
9254 %{
9255 match(Set dst (CmpU3 op1 op2));
9256
9257 ins_cost(ALU_COST * 3 + BRANCH_COST);
9258 format %{ "sltu $dst, $op2, $op1\t#@cmpU3_reg_reg\n\t"
9259 "bnez $dst, done\n\t"
9260 "sltu $dst, $op1, $op2\n\t"
9261 "neg $dst, $dst\n\t"
9262 "done:"
9263 %}
9264 ins_encode %{
9265 __ cmp_uw2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
9266 __ mv(as_Register($dst$$reg), t0);
9267 %}
9268
9269 ins_pipe(pipe_class_default);
9270 %}
9271
9272 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q)
9273 %{
9274 match(Set dst (CmpLTMask p q));
9275
9276 ins_cost(2 * ALU_COST);
9277
9278 format %{ "slt $dst, $p, $q\t#@cmpLTMask_reg_reg\n\t"
9279 "subw $dst, zr, $dst\t#@cmpLTMask_reg_reg"
9280 %}
9281
9282 ins_encode %{
9283 __ slt(as_Register($dst$$reg), as_Register($p$$reg), as_Register($q$$reg));
9284 __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
9285 %}
9286
9287 ins_pipe(ialu_reg_reg);
9288 %}
9289
9290 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I op, immI0 zero)
9291 %{
9292 match(Set dst (CmpLTMask op zero));
9293
9294 ins_cost(ALU_COST);
9295
9296 format %{ "sraiw $dst, $dst, 31\t#@cmpLTMask_reg_reg" %}
9297
9298 ins_encode %{
9299 __ sraiw(as_Register($dst$$reg), as_Register($op$$reg), 31);
9300 %}
9301
9302 ins_pipe(ialu_reg_shift);
9303 %}
9304
9305
9306 // ============================================================================
9307 // Max and Min
9308
9309 instruct minI_reg_reg(iRegINoSp dst, iRegI src)
9310 %{
9311 match(Set dst (MinI dst src));
9312
9313 ins_cost(BRANCH_COST + ALU_COST);
9314 format %{"minI_reg_reg $dst, $dst, $src\t#@minI_reg_reg\n\t"%}
9315
9316 ins_encode %{
9317 __ cmov_gt(as_Register($dst$$reg), as_Register($src$$reg),
9318 as_Register($dst$$reg), as_Register($src$$reg));
9319 %}
9320
9321 ins_pipe(pipe_class_compare);
9322 %}
9323
9324 instruct maxI_reg_reg(iRegINoSp dst, iRegI src)
9325 %{
9326 match(Set dst (MaxI dst src));
9327
9328 ins_cost(BRANCH_COST + ALU_COST);
9329 format %{"maxI_reg_reg $dst, $dst, $src\t#@maxI_reg_reg\n\t"%}
9330
9331 ins_encode %{
9332 __ cmov_lt(as_Register($dst$$reg), as_Register($src$$reg),
9333 as_Register($dst$$reg), as_Register($src$$reg));
9334 %}
9335
9336 ins_pipe(pipe_class_compare);
9337 %}
9338
9339 // special case for comparing with zero
9340 // n.b. this is selected in preference to the rule above because it
9341 // avoids loading constant 0 into a source register
9342
9343 instruct minI_reg_zero(iRegINoSp dst, immI0 zero)
9344 %{
9345 match(Set dst (MinI dst zero));
9346 match(Set dst (MinI zero dst));
9347
9348 ins_cost(BRANCH_COST + ALU_COST);
9349 format %{"minI_reg_zero $dst, $dst, zr\t#@minI_reg_zero\n\t"%}
9350
9351 ins_encode %{
9352 __ cmov_gt(as_Register($dst$$reg), zr,
9353 as_Register($dst$$reg), zr);
9354 %}
9355
9356 ins_pipe(pipe_class_compare);
9357 %}
9358
9359 instruct maxI_reg_zero(iRegINoSp dst, immI0 zero)
9360 %{
9361 match(Set dst (MaxI dst zero));
9362 match(Set dst (MaxI zero dst));
9363
9364 ins_cost(BRANCH_COST + ALU_COST);
9365 format %{"maxI_reg_zero $dst, $dst, zr\t#@maxI_reg_zero\n\t"%}
9366
9367 ins_encode %{
9368 __ cmov_lt(as_Register($dst$$reg), zr,
9369 as_Register($dst$$reg), zr);
9370 %}
9371
9372 ins_pipe(pipe_class_compare);
9373 %}
9374
9375 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
9376 %{
9377 match(Set dst (MinI src1 src2));
9378
9379 effect(DEF dst, USE src1, USE src2);
9380
9381 ins_cost(BRANCH_COST + ALU_COST * 2);
9382 format %{"minI_rReg $dst, $src1, $src2\t#@minI_rReg\n\t"%}
9383
9384 ins_encode %{
9385 __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
9386 __ cmov_gt(as_Register($src1$$reg), as_Register($src2$$reg),
9387 as_Register($dst$$reg), as_Register($src2$$reg));
9388 %}
9389
9390 ins_pipe(pipe_class_compare);
9391 %}
9392
9393 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
9394 %{
9395 match(Set dst (MaxI src1 src2));
9396
9397 effect(DEF dst, USE src1, USE src2);
9398
9399 ins_cost(BRANCH_COST + ALU_COST * 2);
9400 format %{"maxI_rReg $dst, $src1, $src2\t#@maxI_rReg\n\t"%}
9401
9402 ins_encode %{
9403 __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
9404 __ cmov_lt(as_Register($src1$$reg), as_Register($src2$$reg),
9405 as_Register($dst$$reg), as_Register($src2$$reg));
9406 %}
9407
9408 ins_pipe(pipe_class_compare);
9409 %}
9410
9411 // ============================================================================
9412 // Branch Instructions
9413 // Direct Branch.
9414 instruct branch(label lbl)
9415 %{
9416 match(Goto);
9417
9418 effect(USE lbl);
9419
9420 ins_cost(BRANCH_COST);
9421 format %{ "j $lbl\t#@branch" %}
9422
9423 ins_encode(riscv_enc_j(lbl));
9424
9425 ins_pipe(pipe_branch);
9426 %}
9427
9428 // ============================================================================
9429 // Compare and Branch Instructions
9430
9431 // Patterns for short (< 12KiB) variants
9432
9433 // Compare flags and branch near instructions.
9434 instruct cmpFlag_branch(cmpOpEqNe cmp, rFlagsReg cr, label lbl) %{
9435 match(If cmp cr);
9436 effect(USE lbl);
9437
9438 ins_cost(BRANCH_COST);
9439 format %{ "b$cmp $cr, zr, $lbl\t#@cmpFlag_branch" %}
9440
9441 ins_encode %{
9442 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label));
9443 %}
9444 ins_pipe(pipe_cmpz_branch);
9445 ins_short_branch(1);
9446 %}
9447
9448 // Compare signed int and branch near instructions
9449 instruct cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
9450 %{
9451 // Same match rule as `far_cmpI_branch'.
9452 match(If cmp (CmpI op1 op2));
9453
9454 effect(USE lbl);
9455
9456 ins_cost(BRANCH_COST);
9457
9458 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpI_branch" %}
9459
9460 ins_encode %{
9461 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9462 %}
9463
9464 ins_pipe(pipe_cmp_branch);
9465 ins_short_branch(1);
9466 %}
9467
9468 instruct cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
9469 %{
9470 // Same match rule as `far_cmpI_loop'.
9471 match(CountedLoopEnd cmp (CmpI op1 op2));
9472
9473 effect(USE lbl);
9474
9475 ins_cost(BRANCH_COST);
9476
9477 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpI_loop" %}
9478
9479 ins_encode %{
9480 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9481 %}
9482
9483 ins_pipe(pipe_cmp_branch);
9484 ins_short_branch(1);
9485 %}
9486
9487 // Compare unsigned int and branch near instructions
9488 instruct cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl)
9489 %{
9490 // Same match rule as `far_cmpU_branch'.
9491 match(If cmp (CmpU op1 op2));
9492
9493 effect(USE lbl);
9494
9495 ins_cost(BRANCH_COST);
9496
9497 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpU_branch" %}
9498
9499 ins_encode %{
9500 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9501 as_Register($op2$$reg), *($lbl$$label));
9502 %}
9503
9504 ins_pipe(pipe_cmp_branch);
9505 ins_short_branch(1);
9506 %}
9507
9508 // Compare signed long and branch near instructions
9509 instruct cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
9510 %{
9511 // Same match rule as `far_cmpL_branch'.
9512 match(If cmp (CmpL op1 op2));
9513
9514 effect(USE lbl);
9515
9516 ins_cost(BRANCH_COST);
9517
9518 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpL_branch" %}
9519
9520 ins_encode %{
9521 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9522 %}
9523
9524 ins_pipe(pipe_cmp_branch);
9525 ins_short_branch(1);
9526 %}
9527
9528 instruct cmpL_loop(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
9529 %{
9530 // Same match rule as `far_cmpL_loop'.
9531 match(CountedLoopEnd cmp (CmpL op1 op2));
9532
9533 effect(USE lbl);
9534
9535 ins_cost(BRANCH_COST);
9536
9537 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpL_loop" %}
9538
9539 ins_encode %{
9540 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9541 %}
9542
9543 ins_pipe(pipe_cmp_branch);
9544 ins_short_branch(1);
9545 %}
9546
9547 // Compare unsigned long and branch near instructions
9548 instruct cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl)
9549 %{
9550 // Same match rule as `far_cmpUL_branch'.
9551 match(If cmp (CmpUL op1 op2));
9552
9553 effect(USE lbl);
9554
9555 ins_cost(BRANCH_COST);
9556 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpUL_branch" %}
9557
9558 ins_encode %{
9559 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9560 as_Register($op2$$reg), *($lbl$$label));
9561 %}
9562
9563 ins_pipe(pipe_cmp_branch);
9564 ins_short_branch(1);
9565 %}
9566
9567 // Compare pointer and branch near instructions
9568 instruct cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
9569 %{
9570 // Same match rule as `far_cmpP_branch'.
9571 match(If cmp (CmpP op1 op2));
9572
9573 effect(USE lbl);
9574
9575 ins_cost(BRANCH_COST);
9576
9577 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpP_branch" %}
9578
9579 ins_encode %{
9580 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9581 as_Register($op2$$reg), *($lbl$$label));
9582 %}
9583
9584 ins_pipe(pipe_cmp_branch);
9585 ins_short_branch(1);
9586 %}
9587
9588 // Compare narrow pointer and branch near instructions
9589 instruct cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
9590 %{
9591 // Same match rule as `far_cmpN_branch'.
9592 match(If cmp (CmpN op1 op2));
9593
9594 effect(USE lbl);
9595
9596 ins_cost(BRANCH_COST);
9597
9598 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpN_branch" %}
9599
9600 ins_encode %{
9601 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9602 as_Register($op2$$reg), *($lbl$$label));
9603 %}
9604
9605 ins_pipe(pipe_cmp_branch);
9606 ins_short_branch(1);
9607 %}
9608
9609 // Compare float and branch near instructions
9610 instruct cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
9611 %{
9612 // Same match rule as `far_cmpF_branch'.
9613 match(If cmp (CmpF op1 op2));
9614
9615 effect(USE lbl);
9616
9617 ins_cost(XFER_COST + BRANCH_COST);
9618 format %{ "float_b$cmp $op1, $op2, $lbl \t#@cmpF_branch"%}
9619
9620 ins_encode %{
9621 __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label));
9622 %}
9623
9624 ins_pipe(pipe_class_compare);
9625 ins_short_branch(1);
9626 %}
9627
9628 // Compare double and branch near instructions
9629 instruct cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
9630 %{
9631 // Same match rule as `far_cmpD_branch'.
9632 match(If cmp (CmpD op1 op2));
9633 effect(USE lbl);
9634
9635 ins_cost(XFER_COST + BRANCH_COST);
9636 format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_branch"%}
9637
9638 ins_encode %{
9639 __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
9640 as_FloatRegister($op2$$reg), *($lbl$$label));
9641 %}
9642
9643 ins_pipe(pipe_class_compare);
9644 ins_short_branch(1);
9645 %}
9646
9647 // Compare signed int with zero and branch near instructions
9648 instruct cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
9649 %{
9650 // Same match rule as `far_cmpI_reg_imm0_branch'.
9651 match(If cmp (CmpI op1 zero));
9652
9653 effect(USE op1, USE lbl);
9654
9655 ins_cost(BRANCH_COST);
9656 format %{ "b$cmp $op1, zr, $lbl\t#@cmpI_reg_imm0_branch" %}
9657
9658 ins_encode %{
9659 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9660 %}
9661
9662 ins_pipe(pipe_cmpz_branch);
9663 ins_short_branch(1);
9664 %}
9665
9666 instruct cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
9667 %{
9668 // Same match rule as `far_cmpI_reg_imm0_loop'.
9669 match(CountedLoopEnd cmp (CmpI op1 zero));
9670
9671 effect(USE op1, USE lbl);
9672
9673 ins_cost(BRANCH_COST);
9674
9675 format %{ "b$cmp $op1, zr, $lbl\t#@cmpI_reg_imm0_loop" %}
9676
9677 ins_encode %{
9678 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9679 %}
9680
9681 ins_pipe(pipe_cmpz_branch);
9682 ins_short_branch(1);
9683 %}
9684
9685 // Compare unsigned int with zero and branch near instructions
9686 instruct cmpUEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
9687 %{
9688 // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_branch'.
9689 match(If cmp (CmpU op1 zero));
9690
9691 effect(USE op1, USE lbl);
9692
9693 ins_cost(BRANCH_COST);
9694
9695 format %{ "b$cmp $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_branch" %}
9696
9697 ins_encode %{
9698 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9699 %}
9700
9701 ins_pipe(pipe_cmpz_branch);
9702 ins_short_branch(1);
9703 %}
9704
9705 // Compare signed long with zero and branch near instructions
9706 instruct cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
9707 %{
9708 // Same match rule as `far_cmpL_reg_imm0_branch'.
9709 match(If cmp (CmpL op1 zero));
9710
9711 effect(USE op1, USE lbl);
9712
9713 ins_cost(BRANCH_COST);
9714
9715 format %{ "b$cmp $op1, zr, $lbl\t#@cmpL_reg_imm0_branch" %}
9716
9717 ins_encode %{
9718 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9719 %}
9720
9721 ins_pipe(pipe_cmpz_branch);
9722 ins_short_branch(1);
9723 %}
9724
9725 instruct cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
9726 %{
9727 // Same match rule as `far_cmpL_reg_imm0_loop'.
9728 match(CountedLoopEnd cmp (CmpL op1 zero));
9729
9730 effect(USE op1, USE lbl);
9731
9732 ins_cost(BRANCH_COST);
9733
9734 format %{ "b$cmp $op1, zr, $lbl\t#@cmpL_reg_imm0_loop" %}
9735
9736 ins_encode %{
9737 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9738 %}
9739
9740 ins_pipe(pipe_cmpz_branch);
9741 ins_short_branch(1);
9742 %}
9743
9744 // Compare unsigned long with zero and branch near instructions
9745 instruct cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
9746 %{
9747 // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_branch'.
9748 match(If cmp (CmpUL op1 zero));
9749
9750 effect(USE op1, USE lbl);
9751
9752 ins_cost(BRANCH_COST);
9753
9754 format %{ "b$cmp $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_branch" %}
9755
9756 ins_encode %{
9757 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9758 %}
9759
9760 ins_pipe(pipe_cmpz_branch);
9761 ins_short_branch(1);
9762 %}
9763
9764 // Compare pointer with zero and branch near instructions
9765 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
9766 // Same match rule as `far_cmpP_reg_imm0_branch'.
9767 match(If cmp (CmpP op1 zero));
9768 effect(USE lbl);
9769
9770 ins_cost(BRANCH_COST);
9771 format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_imm0_branch" %}
9772
9773 ins_encode %{
9774 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9775 %}
9776
9777 ins_pipe(pipe_cmpz_branch);
9778 ins_short_branch(1);
9779 %}
9780
9781 // Compare narrow pointer with zero and branch near instructions
9782 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
9783 // Same match rule as `far_cmpN_reg_imm0_branch'.
9784 match(If cmp (CmpN op1 zero));
9785 effect(USE lbl);
9786
9787 ins_cost(BRANCH_COST);
9788
9789 format %{ "b$cmp $op1, zr, $lbl\t#@cmpN_imm0_branch" %}
9790
9791 ins_encode %{
9792 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9793 %}
9794
9795 ins_pipe(pipe_cmpz_branch);
9796 ins_short_branch(1);
9797 %}
9798
9799 // Compare narrow pointer with pointer zero and branch near instructions
9800 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
9801 // Same match rule as `far_cmpP_narrowOop_imm0_branch'.
9802 match(If cmp (CmpP (DecodeN op1) zero));
9803 effect(USE lbl);
9804
9805 ins_cost(BRANCH_COST);
9806 format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_branch" %}
9807
9808 ins_encode %{
9809 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9810 %}
9811
9812 ins_pipe(pipe_cmpz_branch);
9813 ins_short_branch(1);
9814 %}
9815
9816 // Patterns for far (20KiB) variants
9817
9818 instruct far_cmpFlag_branch(cmpOp cmp, rFlagsReg cr, label lbl) %{
9819 match(If cmp cr);
9820 effect(USE lbl);
9821
9822 ins_cost(BRANCH_COST);
9823 format %{ "far_b$cmp $cr, zr, $lbl\t#@far_cmpFlag_branch"%}
9824
9825 ins_encode %{
9826 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label), /* is_far */ true);
9827 %}
9828
9829 ins_pipe(pipe_cmpz_branch);
9830 %}
9831
9832 // Compare signed int and branch far instructions
9833 instruct far_cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
9834 match(If cmp (CmpI op1 op2));
9835 effect(USE lbl);
9836
9837 ins_cost(BRANCH_COST * 2);
9838
9839 // the format instruction [far_b$cmp] here is be used as two insructions
9840 // in macroassembler: b$not_cmp(op1, op2, done), j($lbl), bind(done)
9841 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpI_branch" %}
9842
9843 ins_encode %{
9844 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9845 %}
9846
9847 ins_pipe(pipe_cmp_branch);
9848 %}
9849
9850 instruct far_cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
9851 match(CountedLoopEnd cmp (CmpI op1 op2));
9852 effect(USE lbl);
9853
9854 ins_cost(BRANCH_COST * 2);
9855 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpI_loop" %}
9856
9857 ins_encode %{
9858 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9859 %}
9860
9861 ins_pipe(pipe_cmp_branch);
9862 %}
9863
9864 instruct far_cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{
9865 match(If cmp (CmpU op1 op2));
9866 effect(USE lbl);
9867
9868 ins_cost(BRANCH_COST * 2);
9869 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_branch" %}
9870
9871 ins_encode %{
9872 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9873 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9874 %}
9875
9876 ins_pipe(pipe_cmp_branch);
9877 %}
9878
9879 instruct far_cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
9880 match(If cmp (CmpL op1 op2));
9881 effect(USE lbl);
9882
9883 ins_cost(BRANCH_COST * 2);
9884 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpL_branch" %}
9885
9886 ins_encode %{
9887 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9888 %}
9889
9890 ins_pipe(pipe_cmp_branch);
9891 %}
9892
9893 instruct far_cmpLloop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
9894 match(CountedLoopEnd cmp (CmpL op1 op2));
9895 effect(USE lbl);
9896
9897 ins_cost(BRANCH_COST * 2);
9898 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpL_loop" %}
9899
9900 ins_encode %{
9901 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9902 %}
9903
9904 ins_pipe(pipe_cmp_branch);
9905 %}
9906
9907 instruct far_cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{
9908 match(If cmp (CmpUL op1 op2));
9909 effect(USE lbl);
9910
9911 ins_cost(BRANCH_COST * 2);
9912 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpUL_branch" %}
9913
9914 ins_encode %{
9915 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9916 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9917 %}
9918
9919 ins_pipe(pipe_cmp_branch);
9920 %}
9921
9922 instruct far_cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
9923 %{
9924 match(If cmp (CmpP op1 op2));
9925
9926 effect(USE lbl);
9927
9928 ins_cost(BRANCH_COST * 2);
9929
9930 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpP_branch" %}
9931
9932 ins_encode %{
9933 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9934 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9935 %}
9936
9937 ins_pipe(pipe_cmp_branch);
9938 %}
9939
9940 instruct far_cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
9941 %{
9942 match(If cmp (CmpN op1 op2));
9943
9944 effect(USE lbl);
9945
9946 ins_cost(BRANCH_COST * 2);
9947
9948 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpN_branch" %}
9949
9950 ins_encode %{
9951 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9952 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9953 %}
9954
9955 ins_pipe(pipe_cmp_branch);
9956 %}
9957
9958 // Float compare and branch instructions
9959 instruct far_cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
9960 %{
9961 match(If cmp (CmpF op1 op2));
9962
9963 effect(USE lbl);
9964
9965 ins_cost(XFER_COST + BRANCH_COST * 2);
9966 format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_branch"%}
9967
9968 ins_encode %{
9969 __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
9970 *($lbl$$label), /* is_far */ true);
9971 %}
9972
9973 ins_pipe(pipe_class_compare);
9974 %}
9975
9976 // Double compare and branch instructions
9977 instruct far_cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
9978 %{
9979 match(If cmp (CmpD op1 op2));
9980 effect(USE lbl);
9981
9982 ins_cost(XFER_COST + BRANCH_COST * 2);
9983 format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_branch"%}
9984
9985 ins_encode %{
9986 __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
9987 as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true);
9988 %}
9989
9990 ins_pipe(pipe_class_compare);
9991 %}
9992
9993 instruct far_cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
9994 %{
9995 match(If cmp (CmpI op1 zero));
9996
9997 effect(USE op1, USE lbl);
9998
9999 ins_cost(BRANCH_COST * 2);
10000
10001 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpI_reg_imm0_branch" %}
10002
10003 ins_encode %{
10004 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10005 %}
10006
10007 ins_pipe(pipe_cmpz_branch);
10008 %}
10009
10010 instruct far_cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
10011 %{
10012 match(CountedLoopEnd cmp (CmpI op1 zero));
10013
10014 effect(USE op1, USE lbl);
10015
10016 ins_cost(BRANCH_COST * 2);
10017
10018 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpI_reg_imm0_loop" %}
10019
10020 ins_encode %{
10021 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10022 %}
10023
10024 ins_pipe(pipe_cmpz_branch);
10025 %}
10026
10027 instruct far_cmpUEqNeLeGt_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
10028 %{
10029 match(If cmp (CmpU op1 zero));
10030
10031 effect(USE op1, USE lbl);
10032
10033 ins_cost(BRANCH_COST * 2);
10034
10035 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_imm0_branch" %}
10036
10037 ins_encode %{
10038 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10039 %}
10040
10041 ins_pipe(pipe_cmpz_branch);
10042 %}
10043
10044 // compare lt/ge unsigned instructs has no short instruct with same match
10045 instruct far_cmpULtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl)
10046 %{
10047 match(If cmp (CmpU op1 zero));
10048
10049 effect(USE op1, USE lbl);
10050
10051 ins_cost(BRANCH_COST);
10052
10053 format %{ "j $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_branch" %}
10054
10055 ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
10056
10057 ins_pipe(pipe_cmpz_branch);
10058 %}
10059
10060 instruct far_cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
10061 %{
10062 match(If cmp (CmpL op1 zero));
10063
10064 effect(USE op1, USE lbl);
10065
10066 ins_cost(BRANCH_COST * 2);
10067
10068 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpL_reg_imm0_branch" %}
10069
10070 ins_encode %{
10071 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10072 %}
10073
10074 ins_pipe(pipe_cmpz_branch);
10075 %}
10076
10077 instruct far_cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
10078 %{
10079 match(CountedLoopEnd cmp (CmpL op1 zero));
10080
10081 effect(USE op1, USE lbl);
10082
10083 ins_cost(BRANCH_COST * 2);
10084
10085 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpL_reg_imm0_loop" %}
10086
10087 ins_encode %{
10088 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10089 %}
10090
10091 ins_pipe(pipe_cmpz_branch);
10092 %}
10093
10094 instruct far_cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
10095 %{
10096 match(If cmp (CmpUL op1 zero));
10097
10098 effect(USE op1, USE lbl);
10099
10100 ins_cost(BRANCH_COST * 2);
10101
10102 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_branch" %}
10103
10104 ins_encode %{
10105 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10106 %}
10107
10108 ins_pipe(pipe_cmpz_branch);
10109 %}
10110
10111 // compare lt/ge unsigned instructs has no short instruct with same match
10112 instruct far_cmpULLtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl)
10113 %{
10114 match(If cmp (CmpUL op1 zero));
10115
10116 effect(USE op1, USE lbl);
10117
10118 ins_cost(BRANCH_COST);
10119
10120 format %{ "j $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_branch" %}
10121
10122 ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
10123
10124 ins_pipe(pipe_cmpz_branch);
10125 %}
10126
10127 instruct far_cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
10128 match(If cmp (CmpP op1 zero));
10129 effect(USE lbl);
10130
10131 ins_cost(BRANCH_COST * 2);
10132 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_imm0_branch" %}
10133
10134 ins_encode %{
10135 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10136 %}
10137
10138 ins_pipe(pipe_cmpz_branch);
10139 %}
10140
10141 instruct far_cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
10142 match(If cmp (CmpN op1 zero));
10143 effect(USE lbl);
10144
10145 ins_cost(BRANCH_COST * 2);
10146
10147 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpN_imm0_branch" %}
10148
10149 ins_encode %{
10150 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10151 %}
10152
10153 ins_pipe(pipe_cmpz_branch);
10154 %}
10155
10156 instruct far_cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
10157 match(If cmp (CmpP (DecodeN op1) zero));
10158 effect(USE lbl);
10159
10160 ins_cost(BRANCH_COST * 2);
10161 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_branch" %}
10162
10163 ins_encode %{
10164 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10165 %}
10166
10167 ins_pipe(pipe_cmpz_branch);
10168 %}
10169
10170 // ============================================================================
10171 // Conditional Move Instructions
10172
10173 // --------- CMoveI ---------
10174
10175 instruct cmovI_cmpI(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOp cop) %{
10176 match(Set dst (CMoveI (Binary cop (CmpI op1 op2)) (Binary dst src)));
10177 ins_cost(ALU_COST + BRANCH_COST);
10178
10179 format %{
10180 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpI\n\t"
10181 %}
10182
10183 ins_encode %{
10184 __ enc_cmove($cop$$cmpcode,
10185 as_Register($op1$$reg), as_Register($op2$$reg),
10186 as_Register($dst$$reg), as_Register($src$$reg));
10187 %}
10188
10189 ins_pipe(pipe_class_compare);
10190 %}
10191
10192 instruct cmovI_cmpU(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOpU cop) %{
10193 match(Set dst (CMoveI (Binary cop (CmpU op1 op2)) (Binary dst src)));
10194 ins_cost(ALU_COST + BRANCH_COST);
10195
10196 format %{
10197 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpU\n\t"
10198 %}
10199
10200 ins_encode %{
10201 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10202 as_Register($op1$$reg), as_Register($op2$$reg),
10203 as_Register($dst$$reg), as_Register($src$$reg));
10204 %}
10205
10206 ins_pipe(pipe_class_compare);
10207 %}
10208
10209 instruct cmovI_cmpL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOp cop) %{
10210 match(Set dst (CMoveI (Binary cop (CmpL op1 op2)) (Binary dst src)));
10211 ins_cost(ALU_COST + BRANCH_COST);
10212
10213 format %{
10214 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpL\n\t"
10215 %}
10216
10217 ins_encode %{
10218 __ enc_cmove($cop$$cmpcode,
10219 as_Register($op1$$reg), as_Register($op2$$reg),
10220 as_Register($dst$$reg), as_Register($src$$reg));
10221 %}
10222
10223 ins_pipe(pipe_class_compare);
10224 %}
10225
10226 instruct cmovI_cmpUL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOpU cop) %{
10227 match(Set dst (CMoveI (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10228 ins_cost(ALU_COST + BRANCH_COST);
10229
10230 format %{
10231 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpUL\n\t"
10232 %}
10233
10234 ins_encode %{
10235 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10236 as_Register($op1$$reg), as_Register($op2$$reg),
10237 as_Register($dst$$reg), as_Register($src$$reg));
10238 %}
10239
10240 ins_pipe(pipe_class_compare);
10241 %}
10242
10243 instruct cmovI_cmpF(iRegINoSp dst, iRegI src, fRegF op1, fRegF op2, cmpOp cop) %{
10244 match(Set dst (CMoveI (Binary cop (CmpF op1 op2)) (Binary dst src)));
10245 ins_cost(ALU_COST + BRANCH_COST);
10246
10247 format %{
10248 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpF\n\t"
10249 %}
10250
10251 ins_encode %{
10252 __ enc_cmove_cmp_fp($cop$$cmpcode,
10253 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10254 as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10255 %}
10256
10257 ins_pipe(pipe_class_compare);
10258 %}
10259
10260 instruct cmovI_cmpD(iRegINoSp dst, iRegI src, fRegD op1, fRegD op2, cmpOp cop) %{
10261 match(Set dst (CMoveI (Binary cop (CmpD op1 op2)) (Binary dst src)));
10262 ins_cost(ALU_COST + BRANCH_COST);
10263
10264 format %{
10265 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpD\n\t"
10266 %}
10267
10268 ins_encode %{
10269 __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10270 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10271 as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10272 %}
10273
10274 ins_pipe(pipe_class_compare);
10275 %}
10276
10277 instruct cmovI_cmpN(iRegINoSp dst, iRegI src, iRegN op1, iRegN op2, cmpOpU cop) %{
10278 match(Set dst (CMoveI (Binary cop (CmpN op1 op2)) (Binary dst src)));
10279 ins_cost(ALU_COST + BRANCH_COST);
10280
10281 format %{
10282 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpN\n\t"
10283 %}
10284
10285 ins_encode %{
10286 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10287 as_Register($op1$$reg), as_Register($op2$$reg),
10288 as_Register($dst$$reg), as_Register($src$$reg));
10289 %}
10290
10291 ins_pipe(pipe_class_compare);
10292 %}
10293
10294 instruct cmovI_cmpP(iRegINoSp dst, iRegI src, iRegP op1, iRegP op2, cmpOpU cop) %{
10295 match(Set dst (CMoveI (Binary cop (CmpP op1 op2)) (Binary dst src)));
10296 ins_cost(ALU_COST + BRANCH_COST);
10297
10298 format %{
10299 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpP\n\t"
10300 %}
10301
10302 ins_encode %{
10303 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10304 as_Register($op1$$reg), as_Register($op2$$reg),
10305 as_Register($dst$$reg), as_Register($src$$reg));
10306 %}
10307
10308 ins_pipe(pipe_class_compare);
10309 %}
10310
10311 // --------- CMoveL ---------
10312
10313 instruct cmovL_cmpL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOp cop) %{
10314 match(Set dst (CMoveL (Binary cop (CmpL op1 op2)) (Binary dst src)));
10315 ins_cost(ALU_COST + BRANCH_COST);
10316
10317 format %{
10318 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpL\n\t"
10319 %}
10320
10321 ins_encode %{
10322 __ enc_cmove($cop$$cmpcode,
10323 as_Register($op1$$reg), as_Register($op2$$reg),
10324 as_Register($dst$$reg), as_Register($src$$reg));
10325 %}
10326
10327 ins_pipe(pipe_class_compare);
10328 %}
10329
10330 instruct cmovL_cmpUL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOpU cop) %{
10331 match(Set dst (CMoveL (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10332 ins_cost(ALU_COST + BRANCH_COST);
10333
10334 format %{
10335 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpUL\n\t"
10336 %}
10337
10338 ins_encode %{
10339 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10340 as_Register($op1$$reg), as_Register($op2$$reg),
10341 as_Register($dst$$reg), as_Register($src$$reg));
10342 %}
10343
10344 ins_pipe(pipe_class_compare);
10345 %}
10346
10347 instruct cmovL_cmpI(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOp cop) %{
10348 match(Set dst (CMoveL (Binary cop (CmpI op1 op2)) (Binary dst src)));
10349 ins_cost(ALU_COST + BRANCH_COST);
10350
10351 format %{
10352 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpI\n\t"
10353 %}
10354
10355 ins_encode %{
10356 __ enc_cmove($cop$$cmpcode,
10357 as_Register($op1$$reg), as_Register($op2$$reg),
10358 as_Register($dst$$reg), as_Register($src$$reg));
10359 %}
10360
10361 ins_pipe(pipe_class_compare);
10362 %}
10363
10364 instruct cmovL_cmpU(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOpU cop) %{
10365 match(Set dst (CMoveL (Binary cop (CmpU op1 op2)) (Binary dst src)));
10366 ins_cost(ALU_COST + BRANCH_COST);
10367
10368 format %{
10369 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpU\n\t"
10370 %}
10371
10372 ins_encode %{
10373 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10374 as_Register($op1$$reg), as_Register($op2$$reg),
10375 as_Register($dst$$reg), as_Register($src$$reg));
10376 %}
10377
10378 ins_pipe(pipe_class_compare);
10379 %}
10380
10381 instruct cmovL_cmpF(iRegLNoSp dst, iRegL src, fRegF op1, fRegF op2, cmpOp cop) %{
10382 match(Set dst (CMoveL (Binary cop (CmpF op1 op2)) (Binary dst src)));
10383 ins_cost(ALU_COST + BRANCH_COST);
10384
10385 format %{
10386 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpF\n\t"
10387 %}
10388
10389 ins_encode %{
10390 __ enc_cmove_cmp_fp($cop$$cmpcode,
10391 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10392 as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10393 %}
10394
10395 ins_pipe(pipe_class_compare);
10396 %}
10397
10398 instruct cmovL_cmpD(iRegLNoSp dst, iRegL src, fRegD op1, fRegD op2, cmpOp cop) %{
10399 match(Set dst (CMoveL (Binary cop (CmpD op1 op2)) (Binary dst src)));
10400 ins_cost(ALU_COST + BRANCH_COST);
10401
10402 format %{
10403 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpD\n\t"
10404 %}
10405
10406 ins_encode %{
10407 __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10408 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10409 as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10410 %}
10411
10412 ins_pipe(pipe_class_compare);
10413 %}
10414
10415 instruct cmovL_cmpN(iRegLNoSp dst, iRegL src, iRegN op1, iRegN op2, cmpOpU cop) %{
10416 match(Set dst (CMoveL (Binary cop (CmpN op1 op2)) (Binary dst src)));
10417 ins_cost(ALU_COST + BRANCH_COST);
10418
10419 format %{
10420 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpN\n\t"
10421 %}
10422
10423 ins_encode %{
10424 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10425 as_Register($op1$$reg), as_Register($op2$$reg),
10426 as_Register($dst$$reg), as_Register($src$$reg));
10427 %}
10428
10429 ins_pipe(pipe_class_compare);
10430 %}
10431
10432 instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop) %{
10433 match(Set dst (CMoveL (Binary cop (CmpP op1 op2)) (Binary dst src)));
10434 ins_cost(ALU_COST + BRANCH_COST);
10435
10436 format %{
10437 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpP\n\t"
10438 %}
10439
10440 ins_encode %{
10441 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10442 as_Register($op1$$reg), as_Register($op2$$reg),
10443 as_Register($dst$$reg), as_Register($src$$reg));
10444 %}
10445
10446 ins_pipe(pipe_class_compare);
10447 %}
10448
10449 // --------- CMoveF ---------
10450
10451 instruct cmovF_cmpI(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOp cop) %{
10452 match(Set dst (CMoveF (Binary cop (CmpI op1 op2)) (Binary dst src)));
10453 ins_cost(ALU_COST + BRANCH_COST);
10454
10455 format %{
10456 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpI\n\t"
10457 %}
10458
10459 ins_encode %{
10460 __ enc_cmove_fp_cmp($cop$$cmpcode,
10461 as_Register($op1$$reg), as_Register($op2$$reg),
10462 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10463 %}
10464
10465 ins_pipe(pipe_class_compare);
10466 %}
10467
10468 instruct cmovF_cmpU(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOpU cop) %{
10469 match(Set dst (CMoveF (Binary cop (CmpU op1 op2)) (Binary dst src)));
10470 ins_cost(ALU_COST + BRANCH_COST);
10471
10472 format %{
10473 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpU\n\t"
10474 %}
10475
10476 ins_encode %{
10477 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10478 as_Register($op1$$reg), as_Register($op2$$reg),
10479 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10480 %}
10481
10482 ins_pipe(pipe_class_compare);
10483 %}
10484
10485 instruct cmovF_cmpL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOp cop) %{
10486 match(Set dst (CMoveF (Binary cop (CmpL op1 op2)) (Binary dst src)));
10487 ins_cost(ALU_COST + BRANCH_COST);
10488
10489 format %{
10490 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpL\n\t"
10491 %}
10492
10493 ins_encode %{
10494 __ enc_cmove_fp_cmp($cop$$cmpcode,
10495 as_Register($op1$$reg), as_Register($op2$$reg),
10496 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10497 %}
10498
10499 ins_pipe(pipe_class_compare);
10500 %}
10501
10502 instruct cmovF_cmpUL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOpU cop) %{
10503 match(Set dst (CMoveF (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10504 ins_cost(ALU_COST + BRANCH_COST);
10505
10506 format %{
10507 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpUL\n\t"
10508 %}
10509
10510 ins_encode %{
10511 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10512 as_Register($op1$$reg), as_Register($op2$$reg),
10513 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10514 %}
10515
10516 ins_pipe(pipe_class_compare);
10517 %}
10518
10519 instruct cmovF_cmpF(fRegF dst, fRegF src, fRegF op1, fRegF op2, cmpOp cop) %{
10520 match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary dst src)));
10521 ins_cost(ALU_COST + BRANCH_COST);
10522
10523 format %{
10524 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpF\n\t"
10525 %}
10526
10527 ins_encode %{
10528 __ enc_cmove_fp_cmp_fp($cop$$cmpcode,
10529 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10530 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10531 true /* cmp_single */, true /* cmov_single */);
10532 %}
10533
10534 ins_pipe(pipe_class_compare);
10535 %}
10536
10537 instruct cmovF_cmpD(fRegF dst, fRegF src, fRegD op1, fRegD op2, cmpOp cop) %{
10538 match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary dst src)));
10539 ins_cost(ALU_COST + BRANCH_COST);
10540
10541 format %{
10542 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpD\n\t"
10543 %}
10544
10545 ins_encode %{
10546 __ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10547 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10548 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10549 false /* cmp_single */, true /* cmov_single */);
10550 %}
10551
10552 ins_pipe(pipe_class_compare);
10553 %}
10554
10555 instruct cmovF_cmpN(fRegF dst, fRegF src, iRegN op1, iRegN op2, cmpOp cop) %{
10556 match(Set dst (CMoveF (Binary cop (CmpN op1 op2)) (Binary dst src)));
10557 ins_cost(ALU_COST + BRANCH_COST);
10558
10559 format %{
10560 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpN\n\t"
10561 %}
10562
10563 ins_encode %{
10564 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10565 as_Register($op1$$reg), as_Register($op2$$reg),
10566 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10567 %}
10568
10569 ins_pipe(pipe_class_compare);
10570 %}
10571
10572 instruct cmovF_cmpP(fRegF dst, fRegF src, iRegP op1, iRegP op2, cmpOp cop) %{
10573 match(Set dst (CMoveF (Binary cop (CmpP op1 op2)) (Binary dst src)));
10574 ins_cost(ALU_COST + BRANCH_COST);
10575
10576 format %{
10577 "CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpP\n\t"
10578 %}
10579
10580 ins_encode %{
10581 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10582 as_Register($op1$$reg), as_Register($op2$$reg),
10583 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
10584 %}
10585
10586 ins_pipe(pipe_class_compare);
10587 %}
10588
10589 // --------- CMoveD ---------
10590
10591 instruct cmovD_cmpI(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOp cop) %{
10592 match(Set dst (CMoveD (Binary cop (CmpI op1 op2)) (Binary dst src)));
10593 ins_cost(ALU_COST + BRANCH_COST);
10594
10595 format %{
10596 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpI\n\t"
10597 %}
10598
10599 ins_encode %{
10600 __ enc_cmove_fp_cmp($cop$$cmpcode,
10601 as_Register($op1$$reg), as_Register($op2$$reg),
10602 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10603 %}
10604
10605 ins_pipe(pipe_class_compare);
10606 %}
10607
10608 instruct cmovD_cmpU(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOpU cop) %{
10609 match(Set dst (CMoveD (Binary cop (CmpU op1 op2)) (Binary dst src)));
10610 ins_cost(ALU_COST + BRANCH_COST);
10611
10612 format %{
10613 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpU\n\t"
10614 %}
10615
10616 ins_encode %{
10617 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10618 as_Register($op1$$reg), as_Register($op2$$reg),
10619 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10620 %}
10621
10622 ins_pipe(pipe_class_compare);
10623 %}
10624
10625 instruct cmovD_cmpL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOp cop) %{
10626 match(Set dst (CMoveD (Binary cop (CmpL op1 op2)) (Binary dst src)));
10627 ins_cost(ALU_COST + BRANCH_COST);
10628
10629 format %{
10630 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpL\n\t"
10631 %}
10632
10633 ins_encode %{
10634 __ enc_cmove_fp_cmp($cop$$cmpcode,
10635 as_Register($op1$$reg), as_Register($op2$$reg),
10636 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10637 %}
10638
10639 ins_pipe(pipe_class_compare);
10640 %}
10641
10642 instruct cmovD_cmpUL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOpU cop) %{
10643 match(Set dst (CMoveD (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10644 ins_cost(ALU_COST + BRANCH_COST);
10645
10646 format %{
10647 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpUL\n\t"
10648 %}
10649
10650 ins_encode %{
10651 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10652 as_Register($op1$$reg), as_Register($op2$$reg),
10653 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10654 %}
10655
10656 ins_pipe(pipe_class_compare);
10657 %}
10658
10659 instruct cmovD_cmpF(fRegD dst, fRegD src, fRegF op1, fRegF op2, cmpOp cop) %{
10660 match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary dst src)));
10661 ins_cost(ALU_COST + BRANCH_COST);
10662
10663 format %{
10664 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpF\n\t"
10665 %}
10666
10667 ins_encode %{
10668 __ enc_cmove_fp_cmp_fp($cop$$cmpcode,
10669 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10670 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10671 true /* cmp_single */, false /* cmov_single */);
10672 %}
10673
10674 ins_pipe(pipe_class_compare);
10675 %}
10676
10677 instruct cmovD_cmpD(fRegD dst, fRegD src, fRegD op1, fRegD op2, cmpOp cop) %{
10678 match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary dst src)));
10679 ins_cost(ALU_COST + BRANCH_COST);
10680
10681 format %{
10682 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpD\n\t"
10683 %}
10684
10685 ins_encode %{
10686 __ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10687 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10688 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
10689 false /* cmp_single */, false /* cmov_single */);
10690 %}
10691
10692 ins_pipe(pipe_class_compare);
10693 %}
10694
10695 instruct cmovD_cmpN(fRegD dst, fRegD src, iRegN op1, iRegN op2, cmpOp cop) %{
10696 match(Set dst (CMoveD (Binary cop (CmpN op1 op2)) (Binary dst src)));
10697 ins_cost(ALU_COST + BRANCH_COST);
10698
10699 format %{
10700 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpN\n\t"
10701 %}
10702
10703 ins_encode %{
10704 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10705 as_Register($op1$$reg), as_Register($op2$$reg),
10706 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10707 %}
10708
10709 ins_pipe(pipe_class_compare);
10710 %}
10711
10712 instruct cmovD_cmpP(fRegD dst, fRegD src, iRegP op1, iRegP op2, cmpOp cop) %{
10713 match(Set dst (CMoveD (Binary cop (CmpP op1 op2)) (Binary dst src)));
10714 ins_cost(ALU_COST + BRANCH_COST);
10715
10716 format %{
10717 "CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpP\n\t"
10718 %}
10719
10720 ins_encode %{
10721 __ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10722 as_Register($op1$$reg), as_Register($op2$$reg),
10723 as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
10724 %}
10725
10726 ins_pipe(pipe_class_compare);
10727 %}
10728
10729 // ============================================================================
10730 // Procedure Call/Return Instructions
10731
10732 // Call Java Static Instruction
10733 // Note: If this code changes, the corresponding ret_addr_offset() and
10734 // compute_padding() functions will have to be adjusted.
10735 instruct CallStaticJavaDirect(method meth)
10736 %{
10737 match(CallStaticJava);
10738
10739 effect(USE meth);
10740
10741 ins_cost(BRANCH_COST);
10742
10743 format %{ "CALL,static $meth\t#@CallStaticJavaDirect" %}
10744
10745 ins_encode(riscv_enc_java_static_call(meth),
10746 riscv_enc_call_epilog);
10747
10748 ins_pipe(pipe_class_call);
10749 ins_alignment(4);
10750 %}
10751
10752 // TO HERE
10753
10754 // Call Java Dynamic Instruction
10755 // Note: If this code changes, the corresponding ret_addr_offset() and
10756 // compute_padding() functions will have to be adjusted.
10757 instruct CallDynamicJavaDirect(method meth)
10758 %{
10759 match(CallDynamicJava);
10760
10761 effect(USE meth);
10762
10763 ins_cost(BRANCH_COST + ALU_COST * 5);
10764
10765 format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %}
10766
10767 ins_encode(riscv_enc_java_dynamic_call(meth),
10768 riscv_enc_call_epilog);
10769
10770 ins_pipe(pipe_class_call);
10771 ins_alignment(4);
10772 %}
10773
10774 // Call Runtime Instruction
10775
10776 instruct CallRuntimeDirect(method meth)
10777 %{
10778 match(CallRuntime);
10779
10780 effect(USE meth);
10781
10782 ins_cost(BRANCH_COST);
10783
10784 format %{ "CALL, runtime $meth\t#@CallRuntimeDirect" %}
10785
10786 ins_encode(riscv_enc_java_to_runtime(meth));
10787
10788 ins_pipe(pipe_class_call);
10789 ins_alignment(4);
10790 %}
10791
10792 // Call Runtime Instruction
10793
10794 instruct CallLeafDirect(method meth)
10795 %{
10796 match(CallLeaf);
10797
10798 effect(USE meth);
10799
10800 ins_cost(BRANCH_COST);
10801
10802 format %{ "CALL, runtime leaf $meth\t#@CallLeafDirect" %}
10803
10804 ins_encode(riscv_enc_java_to_runtime(meth));
10805
10806 ins_pipe(pipe_class_call);
10807 ins_alignment(4);
10808 %}
10809
10810 // Call Runtime Instruction without safepoint and with vector arguments
10811
10812 instruct CallLeafDirectVector(method meth)
10813 %{
10814 match(CallLeafVector);
10815
10816 effect(USE meth);
10817
10818 ins_cost(BRANCH_COST);
10819
10820 format %{ "CALL, runtime leaf vector $meth" %}
10821
10822 ins_encode(riscv_enc_java_to_runtime(meth));
10823
10824 ins_pipe(pipe_class_call);
10825 ins_alignment(4);
10826 %}
10827
10828 // Call Runtime Instruction
10829
10830 instruct CallLeafNoFPDirect(method meth)
10831 %{
10832 match(CallLeafNoFP);
10833
10834 effect(USE meth);
10835
10836 ins_cost(BRANCH_COST);
10837
10838 format %{ "CALL, runtime leaf nofp $meth\t#@CallLeafNoFPDirect" %}
10839
10840 ins_encode(riscv_enc_java_to_runtime(meth));
10841
10842 ins_pipe(pipe_class_call);
10843 ins_alignment(4);
10844 %}
10845
10846 // ============================================================================
10847 // Partial Subtype Check
10848 //
10849 // superklass array for an instance of the superklass. Set a hidden
10850 // internal cache on a hit (cache is checked with exposed code in
10851 // gen_subtype_check()). Return zero for a hit. The encoding
10852 // ALSO sets flags.
10853
10854 instruct partialSubtypeCheck(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, rFlagsReg cr)
10855 %{
10856 predicate(!UseSecondarySupersTable);
10857 match(Set result (PartialSubtypeCheck sub super));
10858 effect(KILL tmp, KILL cr);
10859
10860 ins_cost(20 * DEFAULT_COST);
10861 format %{ "partialSubtypeCheck $result, $sub, $super\t#@partialSubtypeCheck" %}
10862
10863 ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10864
10865 opcode(0x1); // Force zero of result reg on hit
10866
10867 ins_pipe(pipe_class_memory);
10868 %}
10869
10870 // Two versions of partialSubtypeCheck, both used when we need to
10871 // search for a super class in the secondary supers array. The first
10872 // is used when we don't know _a priori_ the class being searched
10873 // for. The second, far more common, is used when we do know: this is
10874 // used for instanceof, checkcast, and any case where C2 can determine
10875 // it by constant propagation.
10876
10877 instruct partialSubtypeCheckVarSuper(iRegP_R14 sub, iRegP_R10 super, iRegP_R15 result,
10878 iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13,
10879 iRegP_R16 tmpR16, rFlagsReg cr)
10880 %{
10881 predicate(UseSecondarySupersTable);
10882 match(Set result (PartialSubtypeCheck sub super));
10883 effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10884
10885 ins_cost(10 * DEFAULT_COST); // slightly larger than the next version
10886 format %{ "partialSubtypeCheck $result, $sub, $super" %}
10887
10888 ins_encode %{
10889 __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register, $result$$Register,
10890 $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10891 $tmpR16$$Register, nullptr /*L_success*/);
10892 %}
10893
10894 ins_pipe(pipe_class_memory);
10895 %}
10896
10897 instruct partialSubtypeCheckConstSuper(iRegP_R14 sub, iRegP_R10 super_reg, immP super_con, iRegP_R15 result,
10898 iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13, iRegP_R16 tmpR16, rFlagsReg cr)
10899 %{
10900 predicate(UseSecondarySupersTable);
10901 match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
10902 effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10903
10904 ins_cost(5 * DEFAULT_COST); // needs to be less than competing nodes
10905 format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
10906
10907 ins_encode %{
10908 bool success = false;
10909 u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
10910 if (InlineSecondarySupersTest) {
10911 success = __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register, $result$$Register,
10912 $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10913 $tmpR16$$Register, super_klass_slot);
10914 } else {
10915 address call = __ reloc_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
10916 success = (call != nullptr);
10917 }
10918 if (!success) {
10919 ciEnv::current()->record_failure("CodeCache is full");
10920 return;
10921 }
10922 %}
10923
10924 ins_pipe(pipe_class_memory);
10925 %}
10926
10927 instruct string_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10928 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10929 %{
10930 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU);
10931 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10932 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10933
10934 format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %}
10935 ins_encode %{
10936 // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10937 __ string_compare($str1$$Register, $str2$$Register,
10938 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10939 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10940 StrIntrinsicNode::UU);
10941 %}
10942 ins_pipe(pipe_class_memory);
10943 %}
10944
10945 instruct string_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10946 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10947 %{
10948 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL);
10949 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10950 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10951
10952 format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %}
10953 ins_encode %{
10954 __ string_compare($str1$$Register, $str2$$Register,
10955 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10956 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10957 StrIntrinsicNode::LL);
10958 %}
10959 ins_pipe(pipe_class_memory);
10960 %}
10961
10962 instruct string_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10963 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10964 %{
10965 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL);
10966 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10967 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10968
10969 format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %}
10970 ins_encode %{
10971 __ string_compare($str1$$Register, $str2$$Register,
10972 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10973 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10974 StrIntrinsicNode::UL);
10975 %}
10976 ins_pipe(pipe_class_memory);
10977 %}
10978
10979 instruct string_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10980 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3,
10981 rFlagsReg cr)
10982 %{
10983 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU);
10984 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10985 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10986
10987 format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %}
10988 ins_encode %{
10989 __ string_compare($str1$$Register, $str2$$Register,
10990 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10991 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10992 StrIntrinsicNode::LU);
10993 %}
10994 ins_pipe(pipe_class_memory);
10995 %}
10996
10997 instruct string_indexofUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10998 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10999 iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
11000 %{
11001 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
11002 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11003 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
11004 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
11005
11006 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
11007 ins_encode %{
11008 __ string_indexof($str1$$Register, $str2$$Register,
11009 $cnt1$$Register, $cnt2$$Register,
11010 $tmp1$$Register, $tmp2$$Register,
11011 $tmp3$$Register, $tmp4$$Register,
11012 $tmp5$$Register, $tmp6$$Register,
11013 $result$$Register, StrIntrinsicNode::UU);
11014 %}
11015 ins_pipe(pipe_class_memory);
11016 %}
11017
11018 instruct string_indexofLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11019 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
11020 iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
11021 %{
11022 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
11023 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11024 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
11025 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
11026
11027 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
11028 ins_encode %{
11029 __ string_indexof($str1$$Register, $str2$$Register,
11030 $cnt1$$Register, $cnt2$$Register,
11031 $tmp1$$Register, $tmp2$$Register,
11032 $tmp3$$Register, $tmp4$$Register,
11033 $tmp5$$Register, $tmp6$$Register,
11034 $result$$Register, StrIntrinsicNode::LL);
11035 %}
11036 ins_pipe(pipe_class_memory);
11037 %}
11038
11039 instruct string_indexofUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
11040 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
11041 iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
11042 %{
11043 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
11044 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
11045 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
11046 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
11047 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
11048
11049 ins_encode %{
11050 __ string_indexof($str1$$Register, $str2$$Register,
11051 $cnt1$$Register, $cnt2$$Register,
11052 $tmp1$$Register, $tmp2$$Register,
11053 $tmp3$$Register, $tmp4$$Register,
11054 $tmp5$$Register, $tmp6$$Register,
11055 $result$$Register, StrIntrinsicNode::UL);
11056 %}
11057 ins_pipe(pipe_class_memory);
11058 %}
11059
11060 instruct string_indexof_conUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
11061 immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11062 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11063 %{
11064 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
11065 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11066 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
11067 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11068
11069 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
11070
11071 ins_encode %{
11072 int icnt2 = (int)$int_cnt2$$constant;
11073 __ string_indexof_linearscan($str1$$Register, $str2$$Register,
11074 $cnt1$$Register, zr,
11075 $tmp1$$Register, $tmp2$$Register,
11076 $tmp3$$Register, $tmp4$$Register,
11077 icnt2, $result$$Register, StrIntrinsicNode::UU);
11078 %}
11079 ins_pipe(pipe_class_memory);
11080 %}
11081
11082 instruct string_indexof_conLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
11083 immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11084 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11085 %{
11086 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
11087 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11088 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
11089 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11090
11091 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
11092 ins_encode %{
11093 int icnt2 = (int)$int_cnt2$$constant;
11094 __ string_indexof_linearscan($str1$$Register, $str2$$Register,
11095 $cnt1$$Register, zr,
11096 $tmp1$$Register, $tmp2$$Register,
11097 $tmp3$$Register, $tmp4$$Register,
11098 icnt2, $result$$Register, StrIntrinsicNode::LL);
11099 %}
11100 ins_pipe(pipe_class_memory);
11101 %}
11102
11103 instruct string_indexof_conUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
11104 immI_1 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11105 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11106 %{
11107 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
11108 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
11109 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
11110 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11111
11112 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
11113 ins_encode %{
11114 int icnt2 = (int)$int_cnt2$$constant;
11115 __ string_indexof_linearscan($str1$$Register, $str2$$Register,
11116 $cnt1$$Register, zr,
11117 $tmp1$$Register, $tmp2$$Register,
11118 $tmp3$$Register, $tmp4$$Register,
11119 icnt2, $result$$Register, StrIntrinsicNode::UL);
11120 %}
11121 ins_pipe(pipe_class_memory);
11122 %}
11123
11124 instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
11125 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11126 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11127 %{
11128 match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
11129 predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
11130 effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
11131 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11132
11133 format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
11134 ins_encode %{
11135 __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
11136 $result$$Register, $tmp1$$Register, $tmp2$$Register,
11137 $tmp3$$Register, $tmp4$$Register, false /* isU */);
11138 %}
11139 ins_pipe(pipe_class_memory);
11140 %}
11141
11142
11143 instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
11144 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
11145 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
11146 %{
11147 match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
11148 predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
11149 effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
11150 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
11151
11152 format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
11153 ins_encode %{
11154 __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
11155 $result$$Register, $tmp1$$Register, $tmp2$$Register,
11156 $tmp3$$Register, $tmp4$$Register, true /* isL */);
11157 %}
11158 ins_pipe(pipe_class_memory);
11159 %}
11160
11161 // clearing of an array
11162 instruct clearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, iRegP_R30 tmp1,
11163 iRegP_R31 tmp2, rFlagsReg cr, Universe dummy)
11164 %{
11165 // temp registers must match the one used in StubGenerator::generate_zero_blocks()
11166 predicate(UseBlockZeroing || !UseRVV);
11167 match(Set dummy (ClearArray cnt base));
11168 effect(USE_KILL cnt, USE_KILL base, TEMP tmp1, TEMP tmp2, KILL cr);
11169
11170 ins_cost(4 * DEFAULT_COST);
11171 format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %}
11172
11173 ins_encode %{
11174 address tpc = __ zero_words($base$$Register, $cnt$$Register);
11175 if (tpc == nullptr) {
11176 ciEnv::current()->record_failure("CodeCache is full");
11177 return;
11178 }
11179 %}
11180
11181 ins_pipe(pipe_class_memory);
11182 %}
11183
11184 instruct clearArray_imm_reg(immL cnt, iRegP_R28 base, Universe dummy, rFlagsReg cr)
11185 %{
11186 predicate(!UseRVV && (uint64_t)n->in(2)->get_long()
11187 < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
11188 match(Set dummy (ClearArray cnt base));
11189 effect(USE_KILL base, KILL cr);
11190
11191 ins_cost(4 * DEFAULT_COST);
11192 format %{ "ClearArray $cnt, $base\t#@clearArray_imm_reg" %}
11193
11194 ins_encode %{
11195 __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
11196 %}
11197
11198 ins_pipe(pipe_class_memory);
11199 %}
11200
11201 instruct string_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt,
11202 iRegI_R10 result, rFlagsReg cr)
11203 %{
11204 predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
11205 match(Set result (StrEquals (Binary str1 str2) cnt));
11206 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
11207
11208 format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %}
11209 ins_encode %{
11210 // Count is in 8-bit bytes; non-Compact chars are 16 bits.
11211 __ string_equals($str1$$Register, $str2$$Register,
11212 $result$$Register, $cnt$$Register);
11213 %}
11214 ins_pipe(pipe_class_memory);
11215 %}
11216
11217 instruct array_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
11218 iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
11219 %{
11220 predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
11221 match(Set result (AryEq ary1 ary2));
11222 effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
11223
11224 format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsB // KILL all" %}
11225 ins_encode %{
11226 __ arrays_equals($ary1$$Register, $ary2$$Register,
11227 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11228 $result$$Register, 1);
11229 %}
11230 ins_pipe(pipe_class_memory);
11231 %}
11232
11233 instruct array_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
11234 iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
11235 %{
11236 predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
11237 match(Set result (AryEq ary1 ary2));
11238 effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
11239
11240 format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsC // KILL all" %}
11241 ins_encode %{
11242 __ arrays_equals($ary1$$Register, $ary2$$Register,
11243 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11244 $result$$Register, 2);
11245 %}
11246 ins_pipe(pipe_class_memory);
11247 %}
11248
11249 // fast ArraysSupport.vectorizedHashCode
11250 instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
11251 iRegLNoSp tmp1, iRegLNoSp tmp2,
11252 iRegLNoSp tmp3, iRegLNoSp tmp4,
11253 iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
11254 %{
11255 predicate(!UseRVV);
11256 match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
11257 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
11258 USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
11259
11260 format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result // KILL all" %}
11261 ins_encode %{
11262 __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
11263 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11264 $tmp4$$Register, $tmp5$$Register, $tmp6$$Register,
11265 (BasicType)$basic_type$$constant);
11266 %}
11267 ins_pipe(pipe_class_memory);
11268 %}
11269
11270 // ============================================================================
11271 // Safepoint Instructions
11272
11273 instruct safePoint(iRegP poll)
11274 %{
11275 match(SafePoint poll);
11276
11277 ins_cost(2 * LOAD_COST);
11278 format %{
11279 "lwu zr, [$poll]\t# Safepoint: poll for GC, #@safePoint"
11280 %}
11281 ins_encode %{
11282 __ read_polling_page(as_Register($poll$$reg), 0, relocInfo::poll_type);
11283 %}
11284 ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
11285 %}
11286
11287 // ============================================================================
11288 // This name is KNOWN by the ADLC and cannot be changed.
11289 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
11290 // for this guy.
11291 instruct tlsLoadP(javaThread_RegP dst)
11292 %{
11293 match(Set dst (ThreadLocal));
11294
11295 ins_cost(0);
11296
11297 format %{ " -- \t// $dst=Thread::current(), empty, #@tlsLoadP" %}
11298
11299 size(0);
11300
11301 ins_encode( /*empty*/ );
11302
11303 ins_pipe(pipe_class_empty);
11304 %}
11305
11306 // inlined locking and unlocking
11307 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
11308 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
11309 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
11310 %{
11311 match(Set cr (FastLock object box));
11312 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
11313
11314 ins_cost(10 * DEFAULT_COST);
11315 format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
11316
11317 ins_encode %{
11318 __ fast_lock($object$$Register, $box$$Register,
11319 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
11320 %}
11321
11322 ins_pipe(pipe_serial);
11323 %}
11324
11325 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
11326 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box,
11327 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
11328 %{
11329 match(Set cr (FastUnlock object box));
11330 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
11331
11332 ins_cost(10 * DEFAULT_COST);
11333 format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlock" %}
11334
11335 ins_encode %{
11336 __ fast_unlock($object$$Register, $box$$Register,
11337 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
11338 %}
11339
11340 ins_pipe(pipe_serial);
11341 %}
11342
11343 // Tail Call; Jump from runtime stub to Java code.
11344 // Also known as an 'interprocedural jump'.
11345 // Target of jump will eventually return to caller.
11346 // TailJump below removes the return address.
11347 // Don't use fp for 'jump_target' because a MachEpilogNode has already been
11348 // emitted just above the TailCall which has reset fp to the caller state.
11349 instruct TailCalljmpInd(iRegPNoSpNoFp jump_target, inline_cache_RegP method_oop)
11350 %{
11351 match(TailCall jump_target method_oop);
11352
11353 ins_cost(BRANCH_COST);
11354
11355 format %{ "jalr $jump_target\t# $method_oop holds method oop, #@TailCalljmpInd." %}
11356
11357 ins_encode(riscv_enc_tail_call(jump_target));
11358
11359 ins_pipe(pipe_class_call);
11360 %}
11361
11362 instruct TailjmpInd(iRegPNoSpNoFp jump_target, iRegP_R10 ex_oop)
11363 %{
11364 match(TailJump jump_target ex_oop);
11365
11366 ins_cost(ALU_COST + BRANCH_COST);
11367
11368 format %{ "jalr $jump_target\t# $ex_oop holds exception oop, #@TailjmpInd." %}
11369
11370 ins_encode(riscv_enc_tail_jmp(jump_target));
11371
11372 ins_pipe(pipe_class_call);
11373 %}
11374
11375 // Forward exception.
11376 instruct ForwardExceptionjmp()
11377 %{
11378 match(ForwardException);
11379
11380 ins_cost(BRANCH_COST);
11381
11382 format %{ "j forward_exception_stub\t#@ForwardException" %}
11383
11384 ins_encode %{
11385 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
11386 %}
11387
11388 ins_pipe(pipe_class_call);
11389 %}
11390
11391 // Create exception oop: created by stack-crawling runtime code.
11392 // Created exception is now available to this handler, and is setup
11393 // just prior to jumping to this handler. No code emitted.
11394 instruct CreateException(iRegP_R10 ex_oop)
11395 %{
11396 match(Set ex_oop (CreateEx));
11397
11398 ins_cost(0);
11399 format %{ " -- \t// exception oop; no code emitted, #@CreateException" %}
11400
11401 size(0);
11402
11403 ins_encode( /*empty*/ );
11404
11405 ins_pipe(pipe_class_empty);
11406 %}
11407
11408 // Rethrow exception: The exception oop will come in the first
11409 // argument position. Then JUMP (not call) to the rethrow stub code.
11410 instruct RethrowException()
11411 %{
11412 match(Rethrow);
11413
11414 ins_cost(BRANCH_COST);
11415
11416 format %{ "j rethrow_stub\t#@RethrowException" %}
11417
11418 ins_encode(riscv_enc_rethrow());
11419
11420 ins_pipe(pipe_class_call);
11421 %}
11422
11423 // Return Instruction
11424 // epilog node loads ret address into ra as part of frame pop
11425 instruct Ret()
11426 %{
11427 match(Return);
11428
11429 ins_cost(BRANCH_COST);
11430 format %{ "ret\t// return register, #@Ret" %}
11431
11432 ins_encode(riscv_enc_ret());
11433
11434 ins_pipe(pipe_branch);
11435 %}
11436
11437 // Die now.
11438 instruct ShouldNotReachHere() %{
11439 match(Halt);
11440
11441 ins_cost(BRANCH_COST);
11442
11443 format %{ "#@ShouldNotReachHere" %}
11444
11445 ins_encode %{
11446 if (is_reachable()) {
11447 const char* str = __ code_string(_halt_reason);
11448 __ stop(str);
11449 }
11450 %}
11451
11452 ins_pipe(pipe_class_default);
11453 %}
11454
11455
11456 //----------PEEPHOLE RULES-----------------------------------------------------
11457 // These must follow all instruction definitions as they use the names
11458 // defined in the instructions definitions.
11459 //
11460 // peepmatch ( root_instr_name [preceding_instruction]* );
11461 //
11462 // peepconstraint %{
11463 // (instruction_number.operand_name relational_op instruction_number.operand_name
11464 // [, ...] );
11465 // // instruction numbers are zero-based using left to right order in peepmatch
11466 //
11467 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
11468 // // provide an instruction_number.operand_name for each operand that appears
11469 // // in the replacement instruction's match rule
11470 //
11471 // ---------VM FLAGS---------------------------------------------------------
11472 //
11473 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11474 //
11475 // Each peephole rule is given an identifying number starting with zero and
11476 // increasing by one in the order seen by the parser. An individual peephole
11477 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11478 // on the command-line.
11479 //
11480 // ---------CURRENT LIMITATIONS----------------------------------------------
11481 //
11482 // Only match adjacent instructions in same basic block
11483 // Only equality constraints
11484 // Only constraints between operands, not (0.dest_reg == RAX_enc)
11485 // Only one replacement instruction
11486 //
11487 //----------SMARTSPILL RULES---------------------------------------------------
11488 // These must follow all instruction definitions as they use the names
11489 // defined in the instructions definitions.
11490
11491 // Local Variables:
11492 // mode: c++
11493 // End: