1 //
2 // Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
4 // Copyright (c) 2020, 2024, Huawei Technologies Co., Ltd. All rights reserved.
5 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 //
7 // This code is free software; you can redistribute it and/or modify it
8 // under the terms of the GNU General Public License version 2 only, as
9 // published by the Free Software Foundation.
10 //
11 // This code is distributed in the hope that it will be useful, but WITHOUT
12 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 // version 2 for more details (a copy is included in the LICENSE file that
15 // accompanied this code).
16 //
17 // You should have received a copy of the GNU General Public License version
18 // 2 along with this work; if not, write to the Free Software Foundation,
19 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 //
21 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 // or visit www.oracle.com if you need additional information or have any
23 // questions.
24 //
25 //
26
27 // RISCV Architecture Description File
28
29 //----------REGISTER DEFINITION BLOCK------------------------------------------
30 // This information is used by the matcher and the register allocator to
31 // describe individual registers and classes of registers within the target
32 // architecture.
33
34 register %{
35 //----------Architecture Description Register Definitions----------------------
36 // General Registers
37 // "reg_def" name ( register save type, C convention save type,
38 // ideal register type, encoding );
39 // Register Save Types:
40 //
41 // NS = No-Save: The register allocator assumes that these registers
42 // can be used without saving upon entry to the method, &
43 // that they do not need to be saved at call sites.
44 //
45 // SOC = Save-On-Call: The register allocator assumes that these registers
46 // can be used without saving upon entry to the method,
47 // but that they must be saved at call sites.
48 //
49 // SOE = Save-On-Entry: The register allocator assumes that these registers
50 // must be saved before using them upon entry to the
51 // method, but they do not need to be saved at call
52 // sites.
53 //
54 // AS = Always-Save: The register allocator assumes that these registers
55 // must be saved before using them upon entry to the
56 // method, & that they must be saved at call sites.
57 //
58 // Ideal Register Type is used to determine how to save & restore a
59 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
60 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
61 //
62 // The encoding number is the actual bit-pattern placed into the opcodes.
63
64 // We must define the 64 bit int registers in two 32 bit halves, the
65 // real lower register and a virtual upper half register. upper halves
66 // are used by the register allocator but are not actually supplied as
67 // operands to memory ops.
68 //
69 // follow the C1 compiler in making registers
70 //
71 // x7, x9-x17, x27-x31 volatile (caller save)
72 // x0-x4, x8, x23 system (no save, no allocate)
73 // x5-x6 non-allocatable (so we can use them as temporary regs)
74
75 //
76 // as regards Java usage. we don't use any callee save registers
77 // because this makes it difficult to de-optimise a frame (see comment
78 // in x86 implementation of Deoptimization::unwind_callee_save_values)
79 //
80
81 // General Registers
82
83 reg_def R0 ( NS, NS, Op_RegI, 0, x0->as_VMReg() ); // zr
84 reg_def R0_H ( NS, NS, Op_RegI, 0, x0->as_VMReg()->next() );
85 reg_def R1 ( NS, SOC, Op_RegI, 1, x1->as_VMReg() ); // ra
86 reg_def R1_H ( NS, SOC, Op_RegI, 1, x1->as_VMReg()->next() );
87 reg_def R2 ( NS, NS, Op_RegI, 2, x2->as_VMReg() ); // sp
88 reg_def R2_H ( NS, NS, Op_RegI, 2, x2->as_VMReg()->next() );
89 reg_def R3 ( NS, NS, Op_RegI, 3, x3->as_VMReg() ); // gp
90 reg_def R3_H ( NS, NS, Op_RegI, 3, x3->as_VMReg()->next() );
91 reg_def R4 ( NS, NS, Op_RegI, 4, x4->as_VMReg() ); // tp
92 reg_def R4_H ( NS, NS, Op_RegI, 4, x4->as_VMReg()->next() );
93 reg_def R7 ( SOC, SOC, Op_RegI, 7, x7->as_VMReg() );
94 reg_def R7_H ( SOC, SOC, Op_RegI, 7, x7->as_VMReg()->next() );
95 reg_def R8 ( NS, SOE, Op_RegI, 8, x8->as_VMReg() ); // fp
96 reg_def R8_H ( NS, SOE, Op_RegI, 8, x8->as_VMReg()->next() );
97 reg_def R9 ( SOC, SOE, Op_RegI, 9, x9->as_VMReg() );
98 reg_def R9_H ( SOC, SOE, Op_RegI, 9, x9->as_VMReg()->next() );
99 reg_def R10 ( SOC, SOC, Op_RegI, 10, x10->as_VMReg() );
100 reg_def R10_H ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next());
101 reg_def R11 ( SOC, SOC, Op_RegI, 11, x11->as_VMReg() );
102 reg_def R11_H ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next());
103 reg_def R12 ( SOC, SOC, Op_RegI, 12, x12->as_VMReg() );
104 reg_def R12_H ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next());
105 reg_def R13 ( SOC, SOC, Op_RegI, 13, x13->as_VMReg() );
106 reg_def R13_H ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next());
107 reg_def R14 ( SOC, SOC, Op_RegI, 14, x14->as_VMReg() );
108 reg_def R14_H ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next());
109 reg_def R15 ( SOC, SOC, Op_RegI, 15, x15->as_VMReg() );
110 reg_def R15_H ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next());
111 reg_def R16 ( SOC, SOC, Op_RegI, 16, x16->as_VMReg() );
112 reg_def R16_H ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next());
113 reg_def R17 ( SOC, SOC, Op_RegI, 17, x17->as_VMReg() );
114 reg_def R17_H ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next());
115 reg_def R18 ( SOC, SOE, Op_RegI, 18, x18->as_VMReg() );
116 reg_def R18_H ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next());
117 reg_def R19 ( SOC, SOE, Op_RegI, 19, x19->as_VMReg() );
118 reg_def R19_H ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next());
119 reg_def R20 ( SOC, SOE, Op_RegI, 20, x20->as_VMReg() ); // caller esp
120 reg_def R20_H ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next());
121 reg_def R21 ( SOC, SOE, Op_RegI, 21, x21->as_VMReg() );
122 reg_def R21_H ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next());
123 reg_def R22 ( SOC, SOE, Op_RegI, 22, x22->as_VMReg() );
124 reg_def R22_H ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next());
125 reg_def R23 ( NS, SOE, Op_RegI, 23, x23->as_VMReg() ); // java thread
126 reg_def R23_H ( NS, SOE, Op_RegI, 23, x23->as_VMReg()->next());
127 reg_def R24 ( SOC, SOE, Op_RegI, 24, x24->as_VMReg() );
128 reg_def R24_H ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next());
129 reg_def R25 ( SOC, SOE, Op_RegI, 25, x25->as_VMReg() );
130 reg_def R25_H ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next());
131 reg_def R26 ( SOC, SOE, Op_RegI, 26, x26->as_VMReg() );
132 reg_def R26_H ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next());
133 reg_def R27 ( SOC, SOE, Op_RegI, 27, x27->as_VMReg() ); // heapbase
134 reg_def R27_H ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next());
135 reg_def R28 ( SOC, SOC, Op_RegI, 28, x28->as_VMReg() );
136 reg_def R28_H ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next());
137 reg_def R29 ( SOC, SOC, Op_RegI, 29, x29->as_VMReg() );
138 reg_def R29_H ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next());
139 reg_def R30 ( SOC, SOC, Op_RegI, 30, x30->as_VMReg() );
140 reg_def R30_H ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next());
141 reg_def R31 ( SOC, SOC, Op_RegI, 31, x31->as_VMReg() );
142 reg_def R31_H ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next());
143
144 // ----------------------------
145 // Float/Double Registers
146 // ----------------------------
147
148 // Double Registers
149
150 // The rules of ADL require that double registers be defined in pairs.
151 // Each pair must be two 32-bit values, but not necessarily a pair of
152 // single float registers. In each pair, ADLC-assigned register numbers
153 // must be adjacent, with the lower number even. Finally, when the
154 // CPU stores such a register pair to memory, the word associated with
155 // the lower ADLC-assigned number must be stored to the lower address.
156
157 // RISCV has 32 floating-point registers. Each can store a single
158 // or double precision floating-point value.
159
160 // for Java use float registers f0-f31 are always save on call whereas
161 // the platform ABI treats f8-f9 and f18-f27 as callee save). Other
162 // float registers are SOC as per the platform spec
163
164 reg_def F0 ( SOC, SOC, Op_RegF, 0, f0->as_VMReg() );
165 reg_def F0_H ( SOC, SOC, Op_RegF, 0, f0->as_VMReg()->next() );
166 reg_def F1 ( SOC, SOC, Op_RegF, 1, f1->as_VMReg() );
167 reg_def F1_H ( SOC, SOC, Op_RegF, 1, f1->as_VMReg()->next() );
168 reg_def F2 ( SOC, SOC, Op_RegF, 2, f2->as_VMReg() );
169 reg_def F2_H ( SOC, SOC, Op_RegF, 2, f2->as_VMReg()->next() );
170 reg_def F3 ( SOC, SOC, Op_RegF, 3, f3->as_VMReg() );
171 reg_def F3_H ( SOC, SOC, Op_RegF, 3, f3->as_VMReg()->next() );
172 reg_def F4 ( SOC, SOC, Op_RegF, 4, f4->as_VMReg() );
173 reg_def F4_H ( SOC, SOC, Op_RegF, 4, f4->as_VMReg()->next() );
174 reg_def F5 ( SOC, SOC, Op_RegF, 5, f5->as_VMReg() );
175 reg_def F5_H ( SOC, SOC, Op_RegF, 5, f5->as_VMReg()->next() );
176 reg_def F6 ( SOC, SOC, Op_RegF, 6, f6->as_VMReg() );
177 reg_def F6_H ( SOC, SOC, Op_RegF, 6, f6->as_VMReg()->next() );
178 reg_def F7 ( SOC, SOC, Op_RegF, 7, f7->as_VMReg() );
179 reg_def F7_H ( SOC, SOC, Op_RegF, 7, f7->as_VMReg()->next() );
180 reg_def F8 ( SOC, SOE, Op_RegF, 8, f8->as_VMReg() );
181 reg_def F8_H ( SOC, SOE, Op_RegF, 8, f8->as_VMReg()->next() );
182 reg_def F9 ( SOC, SOE, Op_RegF, 9, f9->as_VMReg() );
183 reg_def F9_H ( SOC, SOE, Op_RegF, 9, f9->as_VMReg()->next() );
184 reg_def F10 ( SOC, SOC, Op_RegF, 10, f10->as_VMReg() );
185 reg_def F10_H ( SOC, SOC, Op_RegF, 10, f10->as_VMReg()->next() );
186 reg_def F11 ( SOC, SOC, Op_RegF, 11, f11->as_VMReg() );
187 reg_def F11_H ( SOC, SOC, Op_RegF, 11, f11->as_VMReg()->next() );
188 reg_def F12 ( SOC, SOC, Op_RegF, 12, f12->as_VMReg() );
189 reg_def F12_H ( SOC, SOC, Op_RegF, 12, f12->as_VMReg()->next() );
190 reg_def F13 ( SOC, SOC, Op_RegF, 13, f13->as_VMReg() );
191 reg_def F13_H ( SOC, SOC, Op_RegF, 13, f13->as_VMReg()->next() );
192 reg_def F14 ( SOC, SOC, Op_RegF, 14, f14->as_VMReg() );
193 reg_def F14_H ( SOC, SOC, Op_RegF, 14, f14->as_VMReg()->next() );
194 reg_def F15 ( SOC, SOC, Op_RegF, 15, f15->as_VMReg() );
195 reg_def F15_H ( SOC, SOC, Op_RegF, 15, f15->as_VMReg()->next() );
196 reg_def F16 ( SOC, SOC, Op_RegF, 16, f16->as_VMReg() );
197 reg_def F16_H ( SOC, SOC, Op_RegF, 16, f16->as_VMReg()->next() );
198 reg_def F17 ( SOC, SOC, Op_RegF, 17, f17->as_VMReg() );
199 reg_def F17_H ( SOC, SOC, Op_RegF, 17, f17->as_VMReg()->next() );
200 reg_def F18 ( SOC, SOE, Op_RegF, 18, f18->as_VMReg() );
201 reg_def F18_H ( SOC, SOE, Op_RegF, 18, f18->as_VMReg()->next() );
202 reg_def F19 ( SOC, SOE, Op_RegF, 19, f19->as_VMReg() );
203 reg_def F19_H ( SOC, SOE, Op_RegF, 19, f19->as_VMReg()->next() );
204 reg_def F20 ( SOC, SOE, Op_RegF, 20, f20->as_VMReg() );
205 reg_def F20_H ( SOC, SOE, Op_RegF, 20, f20->as_VMReg()->next() );
206 reg_def F21 ( SOC, SOE, Op_RegF, 21, f21->as_VMReg() );
207 reg_def F21_H ( SOC, SOE, Op_RegF, 21, f21->as_VMReg()->next() );
208 reg_def F22 ( SOC, SOE, Op_RegF, 22, f22->as_VMReg() );
209 reg_def F22_H ( SOC, SOE, Op_RegF, 22, f22->as_VMReg()->next() );
210 reg_def F23 ( SOC, SOE, Op_RegF, 23, f23->as_VMReg() );
211 reg_def F23_H ( SOC, SOE, Op_RegF, 23, f23->as_VMReg()->next() );
212 reg_def F24 ( SOC, SOE, Op_RegF, 24, f24->as_VMReg() );
213 reg_def F24_H ( SOC, SOE, Op_RegF, 24, f24->as_VMReg()->next() );
214 reg_def F25 ( SOC, SOE, Op_RegF, 25, f25->as_VMReg() );
215 reg_def F25_H ( SOC, SOE, Op_RegF, 25, f25->as_VMReg()->next() );
216 reg_def F26 ( SOC, SOE, Op_RegF, 26, f26->as_VMReg() );
217 reg_def F26_H ( SOC, SOE, Op_RegF, 26, f26->as_VMReg()->next() );
218 reg_def F27 ( SOC, SOE, Op_RegF, 27, f27->as_VMReg() );
219 reg_def F27_H ( SOC, SOE, Op_RegF, 27, f27->as_VMReg()->next() );
220 reg_def F28 ( SOC, SOC, Op_RegF, 28, f28->as_VMReg() );
221 reg_def F28_H ( SOC, SOC, Op_RegF, 28, f28->as_VMReg()->next() );
222 reg_def F29 ( SOC, SOC, Op_RegF, 29, f29->as_VMReg() );
223 reg_def F29_H ( SOC, SOC, Op_RegF, 29, f29->as_VMReg()->next() );
224 reg_def F30 ( SOC, SOC, Op_RegF, 30, f30->as_VMReg() );
225 reg_def F30_H ( SOC, SOC, Op_RegF, 30, f30->as_VMReg()->next() );
226 reg_def F31 ( SOC, SOC, Op_RegF, 31, f31->as_VMReg() );
227 reg_def F31_H ( SOC, SOC, Op_RegF, 31, f31->as_VMReg()->next() );
228
229 // ----------------------------
230 // Vector Registers
231 // ----------------------------
232
233 // For RVV vector registers, we simply extend vector register size to 4
234 // 'logical' slots. This is nominally 128 bits but it actually covers
235 // all possible 'physical' RVV vector register lengths from 128 ~ 1024
236 // bits. The 'physical' RVV vector register length is detected during
237 // startup, so the register allocator is able to identify the correct
238 // number of bytes needed for an RVV spill/unspill.
239
240 reg_def V0 ( SOC, SOC, Op_VecA, 0, v0->as_VMReg() );
241 reg_def V0_H ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next() );
242 reg_def V0_J ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(2) );
243 reg_def V0_K ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(3) );
244
245 reg_def V1 ( SOC, SOC, Op_VecA, 1, v1->as_VMReg() );
246 reg_def V1_H ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next() );
247 reg_def V1_J ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(2) );
248 reg_def V1_K ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(3) );
249
250 reg_def V2 ( SOC, SOC, Op_VecA, 2, v2->as_VMReg() );
251 reg_def V2_H ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next() );
252 reg_def V2_J ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(2) );
253 reg_def V2_K ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(3) );
254
255 reg_def V3 ( SOC, SOC, Op_VecA, 3, v3->as_VMReg() );
256 reg_def V3_H ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next() );
257 reg_def V3_J ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(2) );
258 reg_def V3_K ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(3) );
259
260 reg_def V4 ( SOC, SOC, Op_VecA, 4, v4->as_VMReg() );
261 reg_def V4_H ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next() );
262 reg_def V4_J ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(2) );
263 reg_def V4_K ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(3) );
264
265 reg_def V5 ( SOC, SOC, Op_VecA, 5, v5->as_VMReg() );
266 reg_def V5_H ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next() );
267 reg_def V5_J ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(2) );
268 reg_def V5_K ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(3) );
269
270 reg_def V6 ( SOC, SOC, Op_VecA, 6, v6->as_VMReg() );
271 reg_def V6_H ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next() );
272 reg_def V6_J ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(2) );
273 reg_def V6_K ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(3) );
274
275 reg_def V7 ( SOC, SOC, Op_VecA, 7, v7->as_VMReg() );
276 reg_def V7_H ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next() );
277 reg_def V7_J ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(2) );
278 reg_def V7_K ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(3) );
279
280 reg_def V8 ( SOC, SOC, Op_VecA, 8, v8->as_VMReg() );
281 reg_def V8_H ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next() );
282 reg_def V8_J ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(2) );
283 reg_def V8_K ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(3) );
284
285 reg_def V9 ( SOC, SOC, Op_VecA, 9, v9->as_VMReg() );
286 reg_def V9_H ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next() );
287 reg_def V9_J ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(2) );
288 reg_def V9_K ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(3) );
289
290 reg_def V10 ( SOC, SOC, Op_VecA, 10, v10->as_VMReg() );
291 reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next() );
292 reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) );
293 reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) );
294
295 reg_def V11 ( SOC, SOC, Op_VecA, 11, v11->as_VMReg() );
296 reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next() );
297 reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) );
298 reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) );
299
300 reg_def V12 ( SOC, SOC, Op_VecA, 12, v12->as_VMReg() );
301 reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next() );
302 reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) );
303 reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) );
304
305 reg_def V13 ( SOC, SOC, Op_VecA, 13, v13->as_VMReg() );
306 reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next() );
307 reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) );
308 reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) );
309
310 reg_def V14 ( SOC, SOC, Op_VecA, 14, v14->as_VMReg() );
311 reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next() );
312 reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) );
313 reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) );
314
315 reg_def V15 ( SOC, SOC, Op_VecA, 15, v15->as_VMReg() );
316 reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next() );
317 reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) );
318 reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) );
319
320 reg_def V16 ( SOC, SOC, Op_VecA, 16, v16->as_VMReg() );
321 reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next() );
322 reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) );
323 reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) );
324
325 reg_def V17 ( SOC, SOC, Op_VecA, 17, v17->as_VMReg() );
326 reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next() );
327 reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) );
328 reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) );
329
330 reg_def V18 ( SOC, SOC, Op_VecA, 18, v18->as_VMReg() );
331 reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next() );
332 reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) );
333 reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) );
334
335 reg_def V19 ( SOC, SOC, Op_VecA, 19, v19->as_VMReg() );
336 reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next() );
337 reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) );
338 reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) );
339
340 reg_def V20 ( SOC, SOC, Op_VecA, 20, v20->as_VMReg() );
341 reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next() );
342 reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) );
343 reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) );
344
345 reg_def V21 ( SOC, SOC, Op_VecA, 21, v21->as_VMReg() );
346 reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next() );
347 reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) );
348 reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) );
349
350 reg_def V22 ( SOC, SOC, Op_VecA, 22, v22->as_VMReg() );
351 reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next() );
352 reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) );
353 reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) );
354
355 reg_def V23 ( SOC, SOC, Op_VecA, 23, v23->as_VMReg() );
356 reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next() );
357 reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) );
358 reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) );
359
360 reg_def V24 ( SOC, SOC, Op_VecA, 24, v24->as_VMReg() );
361 reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next() );
362 reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) );
363 reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) );
364
365 reg_def V25 ( SOC, SOC, Op_VecA, 25, v25->as_VMReg() );
366 reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next() );
367 reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) );
368 reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) );
369
370 reg_def V26 ( SOC, SOC, Op_VecA, 26, v26->as_VMReg() );
371 reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next() );
372 reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) );
373 reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) );
374
375 reg_def V27 ( SOC, SOC, Op_VecA, 27, v27->as_VMReg() );
376 reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next() );
377 reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) );
378 reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) );
379
380 reg_def V28 ( SOC, SOC, Op_VecA, 28, v28->as_VMReg() );
381 reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next() );
382 reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) );
383 reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) );
384
385 reg_def V29 ( SOC, SOC, Op_VecA, 29, v29->as_VMReg() );
386 reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next() );
387 reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) );
388 reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) );
389
390 reg_def V30 ( SOC, SOC, Op_VecA, 30, v30->as_VMReg() );
391 reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next() );
392 reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) );
393 reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) );
394
395 reg_def V31 ( SOC, SOC, Op_VecA, 31, v31->as_VMReg() );
396 reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next() );
397 reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) );
398 reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) );
399
400 // ----------------------------
401 // Special Registers
402 // ----------------------------
403
404 // On riscv, the physical flag register is missing, so we use t1 instead,
405 // to bridge the RegFlag semantics in share/opto
406
407 reg_def RFLAGS (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg() );
408
409 // Specify priority of register selection within phases of register
410 // allocation. Highest priority is first. A useful heuristic is to
411 // give registers a low priority when they are required by machine
412 // instructions, like EAX and EDX on I486, and choose no-save registers
413 // before save-on-call, & save-on-call before save-on-entry. Registers
414 // which participate in fixed calling sequences should come last.
415 // Registers which are used as pairs must fall on an even boundary.
416
417 alloc_class chunk0(
418 // volatiles
419 R7, R7_H,
420 R28, R28_H,
421 R29, R29_H,
422 R30, R30_H,
423 R31, R31_H,
424
425 // arg registers
426 R10, R10_H,
427 R11, R11_H,
428 R12, R12_H,
429 R13, R13_H,
430 R14, R14_H,
431 R15, R15_H,
432 R16, R16_H,
433 R17, R17_H,
434
435 // non-volatiles
436 R9, R9_H,
437 R18, R18_H,
438 R19, R19_H,
439 R20, R20_H,
440 R21, R21_H,
441 R22, R22_H,
442 R24, R24_H,
443 R25, R25_H,
444 R26, R26_H,
445
446 // non-allocatable registers
447 R23, R23_H, // java thread
448 R27, R27_H, // heapbase
449 R4, R4_H, // thread
450 R8, R8_H, // fp
451 R0, R0_H, // zero
452 R1, R1_H, // ra
453 R2, R2_H, // sp
454 R3, R3_H, // gp
455 );
456
457 alloc_class chunk1(
458
459 // no save
460 F0, F0_H,
461 F1, F1_H,
462 F2, F2_H,
463 F3, F3_H,
464 F4, F4_H,
465 F5, F5_H,
466 F6, F6_H,
467 F7, F7_H,
468 F28, F28_H,
469 F29, F29_H,
470 F30, F30_H,
471 F31, F31_H,
472
473 // arg registers
474 F10, F10_H,
475 F11, F11_H,
476 F12, F12_H,
477 F13, F13_H,
478 F14, F14_H,
479 F15, F15_H,
480 F16, F16_H,
481 F17, F17_H,
482
483 // non-volatiles
484 F8, F8_H,
485 F9, F9_H,
486 F18, F18_H,
487 F19, F19_H,
488 F20, F20_H,
489 F21, F21_H,
490 F22, F22_H,
491 F23, F23_H,
492 F24, F24_H,
493 F25, F25_H,
494 F26, F26_H,
495 F27, F27_H,
496 );
497
498 alloc_class chunk2(
499 V0, V0_H, V0_J, V0_K,
500 V1, V1_H, V1_J, V1_K,
501 V2, V2_H, V2_J, V2_K,
502 V3, V3_H, V3_J, V3_K,
503 V4, V4_H, V4_J, V4_K,
504 V5, V5_H, V5_J, V5_K,
505 V6, V6_H, V6_J, V6_K,
506 V7, V7_H, V7_J, V7_K,
507 V8, V8_H, V8_J, V8_K,
508 V9, V9_H, V9_J, V9_K,
509 V10, V10_H, V10_J, V10_K,
510 V11, V11_H, V11_J, V11_K,
511 V12, V12_H, V12_J, V12_K,
512 V13, V13_H, V13_J, V13_K,
513 V14, V14_H, V14_J, V14_K,
514 V15, V15_H, V15_J, V15_K,
515 V16, V16_H, V16_J, V16_K,
516 V17, V17_H, V17_J, V17_K,
517 V18, V18_H, V18_J, V18_K,
518 V19, V19_H, V19_J, V19_K,
519 V20, V20_H, V20_J, V20_K,
520 V21, V21_H, V21_J, V21_K,
521 V22, V22_H, V22_J, V22_K,
522 V23, V23_H, V23_J, V23_K,
523 V24, V24_H, V24_J, V24_K,
524 V25, V25_H, V25_J, V25_K,
525 V26, V26_H, V26_J, V26_K,
526 V27, V27_H, V27_J, V27_K,
527 V28, V28_H, V28_J, V28_K,
528 V29, V29_H, V29_J, V29_K,
529 V30, V30_H, V30_J, V30_K,
530 V31, V31_H, V31_J, V31_K,
531 );
532
533 alloc_class chunk3(RFLAGS);
534
535 //----------Architecture Description Register Classes--------------------------
536 // Several register classes are automatically defined based upon information in
537 // this architecture description.
538 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
539 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
540 //
541
542 // Class for all 32 bit general purpose registers
543 reg_class all_reg32(
544 R0,
545 R1,
546 R2,
547 R3,
548 R4,
549 R7,
550 R8,
551 R9,
552 R10,
553 R11,
554 R12,
555 R13,
556 R14,
557 R15,
558 R16,
559 R17,
560 R18,
561 R19,
562 R20,
563 R21,
564 R22,
565 R23,
566 R24,
567 R25,
568 R26,
569 R27,
570 R28,
571 R29,
572 R30,
573 R31
574 );
575
576 // Class for any 32 bit integer registers (excluding zr)
577 reg_class any_reg32 %{
578 return _ANY_REG32_mask;
579 %}
580
581 // Singleton class for R10 int register
582 reg_class int_r10_reg(R10);
583
584 // Singleton class for R12 int register
585 reg_class int_r12_reg(R12);
586
587 // Singleton class for R13 int register
588 reg_class int_r13_reg(R13);
589
590 // Singleton class for R14 int register
591 reg_class int_r14_reg(R14);
592
593 // Class for all long integer registers
594 reg_class all_reg(
595 R0, R0_H,
596 R1, R1_H,
597 R2, R2_H,
598 R3, R3_H,
599 R4, R4_H,
600 R7, R7_H,
601 R8, R8_H,
602 R9, R9_H,
603 R10, R10_H,
604 R11, R11_H,
605 R12, R12_H,
606 R13, R13_H,
607 R14, R14_H,
608 R15, R15_H,
609 R16, R16_H,
610 R17, R17_H,
611 R18, R18_H,
612 R19, R19_H,
613 R20, R20_H,
614 R21, R21_H,
615 R22, R22_H,
616 R23, R23_H,
617 R24, R24_H,
618 R25, R25_H,
619 R26, R26_H,
620 R27, R27_H,
621 R28, R28_H,
622 R29, R29_H,
623 R30, R30_H,
624 R31, R31_H
625 );
626
627 // Class for all long integer registers (excluding zr)
628 reg_class any_reg %{
629 return _ANY_REG_mask;
630 %}
631
632 // Class for non-allocatable 32 bit registers
633 reg_class non_allocatable_reg32(
634 R0, // zr
635 R1, // ra
636 R2, // sp
637 R3, // gp
638 R4, // tp
639 R23 // java thread
640 );
641
642 // Class for non-allocatable 64 bit registers
643 reg_class non_allocatable_reg(
644 R0, R0_H, // zr
645 R1, R1_H, // ra
646 R2, R2_H, // sp
647 R3, R3_H, // gp
648 R4, R4_H, // tp
649 R23, R23_H // java thread
650 );
651
652 // Class for all non-special integer registers
653 reg_class no_special_reg32 %{
654 return _NO_SPECIAL_REG32_mask;
655 %}
656
657 // Class for all non-special long integer registers
658 reg_class no_special_reg %{
659 return _NO_SPECIAL_REG_mask;
660 %}
661
662 reg_class ptr_reg %{
663 return _PTR_REG_mask;
664 %}
665
666 // Class for all non_special pointer registers
667 reg_class no_special_ptr_reg %{
668 return _NO_SPECIAL_PTR_REG_mask;
669 %}
670
671 // Class for all non_special pointer registers (excluding fp)
672 reg_class no_special_no_fp_ptr_reg %{
673 return _NO_SPECIAL_NO_FP_PTR_REG_mask;
674 %}
675
676 // Class for 64 bit register r10
677 reg_class r10_reg(
678 R10, R10_H
679 );
680
681 // Class for 64 bit register r11
682 reg_class r11_reg(
683 R11, R11_H
684 );
685
686 // Class for 64 bit register r12
687 reg_class r12_reg(
688 R12, R12_H
689 );
690
691 // Class for 64 bit register r13
692 reg_class r13_reg(
693 R13, R13_H
694 );
695
696 // Class for 64 bit register r14
697 reg_class r14_reg(
698 R14, R14_H
699 );
700
701 // Class for 64 bit register r15
702 reg_class r15_reg(
703 R15, R15_H
704 );
705
706 // Class for 64 bit register r16
707 reg_class r16_reg(
708 R16, R16_H
709 );
710
711 // Class for method register
712 reg_class method_reg(
713 R31, R31_H
714 );
715
716 // Class for java thread register
717 reg_class java_thread_reg(
718 R23, R23_H
719 );
720
721 reg_class r28_reg(
722 R28, R28_H
723 );
724
725 reg_class r29_reg(
726 R29, R29_H
727 );
728
729 reg_class r30_reg(
730 R30, R30_H
731 );
732
733 reg_class r31_reg(
734 R31, R31_H
735 );
736
737 // Class for zero registesr
738 reg_class zr_reg(
739 R0, R0_H
740 );
741
742 // Class for thread register
743 reg_class thread_reg(
744 R4, R4_H
745 );
746
747 // Class for frame pointer register
748 reg_class fp_reg(
749 R8, R8_H
750 );
751
752 // Class for link register
753 reg_class ra_reg(
754 R1, R1_H
755 );
756
757 // Class for long sp register
758 reg_class sp_reg(
759 R2, R2_H
760 );
761
762 // Class for all float registers
763 reg_class float_reg(
764 F0,
765 F1,
766 F2,
767 F3,
768 F4,
769 F5,
770 F6,
771 F7,
772 F8,
773 F9,
774 F10,
775 F11,
776 F12,
777 F13,
778 F14,
779 F15,
780 F16,
781 F17,
782 F18,
783 F19,
784 F20,
785 F21,
786 F22,
787 F23,
788 F24,
789 F25,
790 F26,
791 F27,
792 F28,
793 F29,
794 F30,
795 F31
796 );
797
798 // Double precision float registers have virtual `high halves' that
799 // are needed by the allocator.
800 // Class for all double registers
801 reg_class double_reg(
802 F0, F0_H,
803 F1, F1_H,
804 F2, F2_H,
805 F3, F3_H,
806 F4, F4_H,
807 F5, F5_H,
808 F6, F6_H,
809 F7, F7_H,
810 F8, F8_H,
811 F9, F9_H,
812 F10, F10_H,
813 F11, F11_H,
814 F12, F12_H,
815 F13, F13_H,
816 F14, F14_H,
817 F15, F15_H,
818 F16, F16_H,
819 F17, F17_H,
820 F18, F18_H,
821 F19, F19_H,
822 F20, F20_H,
823 F21, F21_H,
824 F22, F22_H,
825 F23, F23_H,
826 F24, F24_H,
827 F25, F25_H,
828 F26, F26_H,
829 F27, F27_H,
830 F28, F28_H,
831 F29, F29_H,
832 F30, F30_H,
833 F31, F31_H
834 );
835
836 // Class for RVV vector registers
837 // Note: v0, v30 and v31 are used as mask registers.
838 reg_class vectora_reg(
839 V1, V1_H, V1_J, V1_K,
840 V2, V2_H, V2_J, V2_K,
841 V3, V3_H, V3_J, V3_K,
842 V4, V4_H, V4_J, V4_K,
843 V5, V5_H, V5_J, V5_K,
844 V6, V6_H, V6_J, V6_K,
845 V7, V7_H, V7_J, V7_K,
846 V8, V8_H, V8_J, V8_K,
847 V9, V9_H, V9_J, V9_K,
848 V10, V10_H, V10_J, V10_K,
849 V11, V11_H, V11_J, V11_K,
850 V12, V12_H, V12_J, V12_K,
851 V13, V13_H, V13_J, V13_K,
852 V14, V14_H, V14_J, V14_K,
853 V15, V15_H, V15_J, V15_K,
854 V16, V16_H, V16_J, V16_K,
855 V17, V17_H, V17_J, V17_K,
856 V18, V18_H, V18_J, V18_K,
857 V19, V19_H, V19_J, V19_K,
858 V20, V20_H, V20_J, V20_K,
859 V21, V21_H, V21_J, V21_K,
860 V22, V22_H, V22_J, V22_K,
861 V23, V23_H, V23_J, V23_K,
862 V24, V24_H, V24_J, V24_K,
863 V25, V25_H, V25_J, V25_K,
864 V26, V26_H, V26_J, V26_K,
865 V27, V27_H, V27_J, V27_K,
866 V28, V28_H, V28_J, V28_K,
867 V29, V29_H, V29_J, V29_K
868 );
869
870 // Class for 64 bit register f0
871 reg_class f0_reg(
872 F0, F0_H
873 );
874
875 // Class for 64 bit register f1
876 reg_class f1_reg(
877 F1, F1_H
878 );
879
880 // Class for 64 bit register f2
881 reg_class f2_reg(
882 F2, F2_H
883 );
884
885 // Class for 64 bit register f3
886 reg_class f3_reg(
887 F3, F3_H
888 );
889
890 // class for vector register v1
891 reg_class v1_reg(
892 V1, V1_H, V1_J, V1_K
893 );
894
895 // class for vector register v2
896 reg_class v2_reg(
897 V2, V2_H, V2_J, V2_K
898 );
899
900 // class for vector register v3
901 reg_class v3_reg(
902 V3, V3_H, V3_J, V3_K
903 );
904
905 // class for vector register v4
906 reg_class v4_reg(
907 V4, V4_H, V4_J, V4_K
908 );
909
910 // class for vector register v5
911 reg_class v5_reg(
912 V5, V5_H, V5_J, V5_K
913 );
914
915 // class for vector register v6
916 reg_class v6_reg(
917 V6, V6_H, V6_J, V6_K
918 );
919
920 // class for vector register v7
921 reg_class v7_reg(
922 V7, V7_H, V7_J, V7_K
923 );
924
925 // class for vector register v8
926 reg_class v8_reg(
927 V8, V8_H, V8_J, V8_K
928 );
929
930 // class for vector register v9
931 reg_class v9_reg(
932 V9, V9_H, V9_J, V9_K
933 );
934
935 // class for vector register v10
936 reg_class v10_reg(
937 V10, V10_H, V10_J, V10_K
938 );
939
940 // class for vector register v11
941 reg_class v11_reg(
942 V11, V11_H, V11_J, V11_K
943 );
944
945 // class for condition codes
946 reg_class reg_flags(RFLAGS);
947
948 // Class for RVV v0 mask register
949 // https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#53-vector-masking
950 // The mask value used to control execution of a masked vector
951 // instruction is always supplied by vector register v0.
952 reg_class vmask_reg_v0 (
953 V0
954 );
955
956 // Class for RVV mask registers
957 // We need two more vmask registers to do the vector mask logical ops,
958 // so define v30, v31 as mask register too.
959 reg_class vmask_reg (
960 V0,
961 V30,
962 V31
963 );
964 %}
965
966 //----------DEFINITION BLOCK---------------------------------------------------
967 // Define name --> value mappings to inform the ADLC of an integer valued name
968 // Current support includes integer values in the range [0, 0x7FFFFFFF]
969 // Format:
970 // int_def <name> ( <int_value>, <expression>);
971 // Generated Code in ad_<arch>.hpp
972 // #define <name> (<expression>)
973 // // value == <int_value>
974 // Generated code in ad_<arch>.cpp adlc_verification()
975 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
976 //
977
978 // we follow the ppc-aix port in using a simple cost model which ranks
979 // register operations as cheap, memory ops as more expensive and
980 // branches as most expensive. the first two have a low as well as a
981 // normal cost. huge cost appears to be a way of saying don't do
982 // something
983
984 definitions %{
985 // The default cost (of a register move instruction).
986 int_def DEFAULT_COST ( 100, 100);
987 int_def ALU_COST ( 100, 1 * DEFAULT_COST); // unknown, const, arith, shift, slt,
988 // multi, auipc, nop, logical, move
989 int_def LOAD_COST ( 300, 3 * DEFAULT_COST); // load, fpload
990 int_def STORE_COST ( 100, 1 * DEFAULT_COST); // store, fpstore
991 int_def XFER_COST ( 300, 3 * DEFAULT_COST); // mfc, mtc, fcvt, fmove, fcmp
992 int_def FMVX_COST ( 100, 1 * DEFAULT_COST); // shuffles with no conversion
993 int_def BRANCH_COST ( 200, 2 * DEFAULT_COST); // branch, jmp, call
994 int_def IMUL_COST ( 1000, 10 * DEFAULT_COST); // imul
995 int_def IDIVSI_COST ( 3400, 34 * DEFAULT_COST); // idivsi
996 int_def IDIVDI_COST ( 6600, 66 * DEFAULT_COST); // idivdi
997 int_def FMUL_SINGLE_COST ( 500, 5 * DEFAULT_COST); // fmul, fmadd
998 int_def FMUL_DOUBLE_COST ( 700, 7 * DEFAULT_COST); // fmul, fmadd
999 int_def FDIV_COST ( 2000, 20 * DEFAULT_COST); // fdiv
1000 int_def FSQRT_COST ( 2500, 25 * DEFAULT_COST); // fsqrt
1001 int_def VOLATILE_REF_COST ( 1000, 10 * DEFAULT_COST);
1002 int_def CACHE_MISS_COST ( 2000, 20 * DEFAULT_COST); // typicall cache miss penalty
1003 %}
1004
1005
1006
1007 //----------SOURCE BLOCK-------------------------------------------------------
1008 // This is a block of C++ code which provides values, functions, and
1009 // definitions necessary in the rest of the architecture description
1010
1011 source_hpp %{
1012
1013 #include "asm/macroAssembler.hpp"
1014 #include "gc/shared/barrierSetAssembler.hpp"
1015 #include "gc/shared/cardTable.hpp"
1016 #include "gc/shared/cardTableBarrierSet.hpp"
1017 #include "gc/shared/collectedHeap.hpp"
1018 #include "opto/addnode.hpp"
1019 #include "opto/convertnode.hpp"
1020 #include "runtime/objectMonitor.hpp"
1021
1022 extern RegMask _ANY_REG32_mask;
1023 extern RegMask _ANY_REG_mask;
1024 extern RegMask _PTR_REG_mask;
1025 extern RegMask _NO_SPECIAL_REG32_mask;
1026 extern RegMask _NO_SPECIAL_REG_mask;
1027 extern RegMask _NO_SPECIAL_PTR_REG_mask;
1028 extern RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
1029
1030 class CallStubImpl {
1031
1032 //--------------------------------------------------------------
1033 //---< Used for optimization in Compile::shorten_branches >---
1034 //--------------------------------------------------------------
1035
1036 public:
1037 // Size of call trampoline stub.
1038 static uint size_call_trampoline() {
1039 return 0; // no call trampolines on this platform
1040 }
1041
1042 // number of relocations needed by a call trampoline stub
1043 static uint reloc_call_trampoline() {
1044 return 0; // no call trampolines on this platform
1045 }
1046 };
1047
1048 class HandlerImpl {
1049
1050 public:
1051
1052 static int emit_exception_handler(C2_MacroAssembler *masm);
1053 static int emit_deopt_handler(C2_MacroAssembler* masm);
1054
1055 static uint size_exception_handler() {
1056 return MacroAssembler::far_branch_size();
1057 }
1058
1059 static uint size_deopt_handler() {
1060 // count auipc + far branch
1061 return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
1062 }
1063 };
1064
1065 class Node::PD {
1066 public:
1067 enum NodeFlags {
1068 _last_flag = Node::_last_flag
1069 };
1070 };
1071
1072 bool is_CAS(int opcode, bool maybe_volatile);
1073
1074 // predicate controlling translation of CompareAndSwapX
1075 bool needs_acquiring_load_reserved(const Node *load);
1076
1077 // predicate controlling addressing modes
1078 bool size_fits_all_mem_uses(AddPNode* addp, int shift);
1079 %}
1080
1081 source %{
1082
1083 // Derived RegMask with conditionally allocatable registers
1084
1085 RegMask _ANY_REG32_mask;
1086 RegMask _ANY_REG_mask;
1087 RegMask _PTR_REG_mask;
1088 RegMask _NO_SPECIAL_REG32_mask;
1089 RegMask _NO_SPECIAL_REG_mask;
1090 RegMask _NO_SPECIAL_PTR_REG_mask;
1091 RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
1092
1093 void reg_mask_init() {
1094
1095 _ANY_REG32_mask = _ALL_REG32_mask;
1096 _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg()));
1097
1098 _ANY_REG_mask = _ALL_REG_mask;
1099 _ANY_REG_mask.SUBTRACT(_ZR_REG_mask);
1100
1101 _PTR_REG_mask = _ALL_REG_mask;
1102 _PTR_REG_mask.SUBTRACT(_ZR_REG_mask);
1103
1104 _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
1105 _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
1106
1107 _NO_SPECIAL_REG_mask = _ALL_REG_mask;
1108 _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1109
1110 _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
1111 _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
1112
1113 // x27 is not allocatable when compressed oops is on
1114 if (UseCompressedOops) {
1115 _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
1116 _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
1117 _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
1118 }
1119
1120 // x8 is not allocatable when PreserveFramePointer is on
1121 if (PreserveFramePointer) {
1122 _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1123 _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1124 _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1125 }
1126
1127 _NO_SPECIAL_NO_FP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
1128 _NO_SPECIAL_NO_FP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
1129 }
1130
1131 void PhaseOutput::pd_perform_mach_node_analysis() {
1132 }
1133
1134 int MachNode::pd_alignment_required() const {
1135 return 1;
1136 }
1137
1138 int MachNode::compute_padding(int current_offset) const {
1139 return 0;
1140 }
1141
1142 // is_CAS(int opcode, bool maybe_volatile)
1143 //
1144 // return true if opcode is one of the possible CompareAndSwapX
1145 // values otherwise false.
1146 bool is_CAS(int opcode, bool maybe_volatile)
1147 {
1148 switch (opcode) {
1149 // We handle these
1150 case Op_CompareAndSwapI:
1151 case Op_CompareAndSwapL:
1152 case Op_CompareAndSwapP:
1153 case Op_CompareAndSwapN:
1154 case Op_ShenandoahCompareAndSwapP:
1155 case Op_ShenandoahCompareAndSwapN:
1156 case Op_CompareAndSwapB:
1157 case Op_CompareAndSwapS:
1158 case Op_GetAndSetI:
1159 case Op_GetAndSetL:
1160 case Op_GetAndSetP:
1161 case Op_GetAndSetN:
1162 case Op_GetAndAddI:
1163 case Op_GetAndAddL:
1164 return true;
1165 case Op_CompareAndExchangeI:
1166 case Op_CompareAndExchangeN:
1167 case Op_CompareAndExchangeB:
1168 case Op_CompareAndExchangeS:
1169 case Op_CompareAndExchangeL:
1170 case Op_CompareAndExchangeP:
1171 case Op_WeakCompareAndSwapB:
1172 case Op_WeakCompareAndSwapS:
1173 case Op_WeakCompareAndSwapI:
1174 case Op_WeakCompareAndSwapL:
1175 case Op_WeakCompareAndSwapP:
1176 case Op_WeakCompareAndSwapN:
1177 case Op_ShenandoahWeakCompareAndSwapP:
1178 case Op_ShenandoahWeakCompareAndSwapN:
1179 case Op_ShenandoahCompareAndExchangeP:
1180 case Op_ShenandoahCompareAndExchangeN:
1181 return maybe_volatile;
1182 default:
1183 return false;
1184 }
1185 }
1186
1187 // predicate controlling translation of CAS
1188 //
1189 // returns true if CAS needs to use an acquiring load otherwise false
1190 bool needs_acquiring_load_reserved(const Node *n)
1191 {
1192 assert(n != nullptr && is_CAS(n->Opcode(), true), "expecting a compare and swap");
1193
1194 LoadStoreNode* ldst = n->as_LoadStore();
1195 if (n != nullptr && is_CAS(n->Opcode(), false)) {
1196 assert(ldst != nullptr && ldst->trailing_membar() != nullptr, "expected trailing membar");
1197 } else {
1198 return ldst != nullptr && ldst->trailing_membar() != nullptr;
1199 }
1200 // so we can just return true here
1201 return true;
1202 }
1203 #define __ masm->
1204
1205 // advance declarations for helper functions to convert register
1206 // indices to register objects
1207
1208 // the ad file has to provide implementations of certain methods
1209 // expected by the generic code
1210 //
1211 // REQUIRED FUNCTIONALITY
1212
1213 //=============================================================================
1214
1215 // !!!!! Special hack to get all types of calls to specify the byte offset
1216 // from the start of the call to the point where the return address
1217 // will point.
1218
1219 int MachCallStaticJavaNode::ret_addr_offset()
1220 {
1221 return 3 * NativeInstruction::instruction_size; // auipc + ld + jalr
1222 }
1223
1224 int MachCallDynamicJavaNode::ret_addr_offset()
1225 {
1226 return NativeMovConstReg::movptr2_instruction_size + (3 * NativeInstruction::instruction_size); // movptr2, auipc + ld + jal
1227 }
1228
1229 int MachCallRuntimeNode::ret_addr_offset() {
1230 // For address inside the code cache the call will be:
1231 // auipc + jalr
1232 // For real runtime callouts it will be 8 instructions
1233 // see riscv_enc_java_to_runtime
1234 // la(t0, retaddr) -> auipc + addi
1235 // sd(t0, Address(xthread, JavaThread::last_Java_pc_offset())) -> sd
1236 // movptr(t1, addr, offset, t0) -> lui + lui + slli + add
1237 // jalr(t1, offset) -> jalr
1238 if (CodeCache::contains(_entry_point)) {
1239 return 2 * NativeInstruction::instruction_size;
1240 } else {
1241 return 8 * NativeInstruction::instruction_size;
1242 }
1243 }
1244
1245 //
1246 // Compute padding required for nodes which need alignment
1247 //
1248
1249 // With RVC a call instruction may get 2-byte aligned.
1250 // The address of the call instruction needs to be 4-byte aligned to
1251 // ensure that it does not span a cache line so that it can be patched.
1252 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
1253 {
1254 // to make sure the address of jal 4-byte aligned.
1255 return align_up(current_offset, alignment_required()) - current_offset;
1256 }
1257
1258 // With RVC a call instruction may get 2-byte aligned.
1259 // The address of the call instruction needs to be 4-byte aligned to
1260 // ensure that it does not span a cache line so that it can be patched.
1261 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
1262 {
1263 // skip the movptr2 in MacroAssembler::ic_call():
1264 // lui, lui, slli, add, addi
1265 // Though movptr2() has already 4-byte aligned with or without RVC,
1266 // We need to prevent from further changes by explicitly calculating the size.
1267 current_offset += NativeMovConstReg::movptr2_instruction_size;
1268 // to make sure the address of jal 4-byte aligned.
1269 return align_up(current_offset, alignment_required()) - current_offset;
1270 }
1271
1272 //=============================================================================
1273
1274 #ifndef PRODUCT
1275 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1276 assert_cond(st != nullptr);
1277 st->print("BREAKPOINT");
1278 }
1279 #endif
1280
1281 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1282 __ ebreak();
1283 }
1284
1285 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
1286 return MachNode::size(ra_);
1287 }
1288
1289 //=============================================================================
1290
1291 #ifndef PRODUCT
1292 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
1293 st->print("nop \t# %d bytes pad for loops and calls", _count);
1294 }
1295 #endif
1296
1297 void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
1298 Assembler::CompressibleScope scope(masm); // nops shall be 2-byte under RVC for alignment purposes.
1299 for (int i = 0; i < _count; i++) {
1300 __ nop();
1301 }
1302 }
1303
1304 uint MachNopNode::size(PhaseRegAlloc*) const {
1305 return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size);
1306 }
1307
1308 //=============================================================================
1309 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1310
1311 int ConstantTable::calculate_table_base_offset() const {
1312 return 0; // absolute addressing, no offset
1313 }
1314
1315 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1316 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1317 ShouldNotReachHere();
1318 }
1319
1320 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
1321 // Empty encoding
1322 }
1323
1324 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1325 return 0;
1326 }
1327
1328 #ifndef PRODUCT
1329 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1330 assert_cond(st != nullptr);
1331 st->print("-- \t// MachConstantBaseNode (empty encoding)");
1332 }
1333 #endif
1334
1335 #ifndef PRODUCT
1336 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1337 assert_cond(st != nullptr && ra_ != nullptr);
1338 Compile* C = ra_->C;
1339
1340 int framesize = C->output()->frame_slots() << LogBytesPerInt;
1341
1342 if (C->output()->need_stack_bang(framesize)) {
1343 st->print("# stack bang size=%d\n\t", framesize);
1344 }
1345
1346 st->print("sd fp, [sp, #%d]\n\t", - 2 * wordSize);
1347 st->print("sd ra, [sp, #%d]\n\t", - wordSize);
1348 if (PreserveFramePointer) { st->print("sub fp, sp, #%d\n\t", 2 * wordSize); }
1349 st->print("sub sp, sp, #%d\n\t", framesize);
1350
1351 if (C->stub_function() == nullptr) {
1352 st->print("ld t0, [guard]\n\t");
1353 st->print("membar LoadLoad\n\t");
1354 st->print("ld t1, [xthread, #thread_disarmed_guard_value_offset]\n\t");
1355 st->print("beq t0, t1, skip\n\t");
1356 st->print("jalr #nmethod_entry_barrier_stub\n\t");
1357 st->print("j skip\n\t");
1358 st->print("guard: int\n\t");
1359 st->print("skip:\n\t");
1360 }
1361 }
1362 #endif
1363
1364 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1365 assert_cond(ra_ != nullptr);
1366 Compile* C = ra_->C;
1367
1368 // n.b. frame size includes space for return pc and fp
1369 const int framesize = C->output()->frame_size_in_bytes();
1370
1371 assert_cond(C != nullptr);
1372
1373 if (C->clinit_barrier_on_entry()) {
1374 assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1375
1376 Label L_skip_barrier;
1377
1378 __ mov_metadata(t1, C->method()->holder()->constant_encoding());
1379 __ clinit_barrier(t1, t0, &L_skip_barrier);
1380 __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1381 __ bind(L_skip_barrier);
1382 }
1383
1384 int bangsize = C->output()->bang_size_in_bytes();
1385 if (C->output()->need_stack_bang(bangsize)) {
1386 __ generate_stack_overflow_check(bangsize);
1387 }
1388
1389 __ build_frame(framesize);
1390
1391 if (C->stub_function() == nullptr) {
1392 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1393 // Dummy labels for just measuring the code size
1394 Label dummy_slow_path;
1395 Label dummy_continuation;
1396 Label dummy_guard;
1397 Label* slow_path = &dummy_slow_path;
1398 Label* continuation = &dummy_continuation;
1399 Label* guard = &dummy_guard;
1400 if (!Compile::current()->output()->in_scratch_emit_size()) {
1401 // Use real labels from actual stub when not emitting code for purpose of measuring its size
1402 C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
1403 Compile::current()->output()->add_stub(stub);
1404 slow_path = &stub->entry();
1405 continuation = &stub->continuation();
1406 guard = &stub->guard();
1407 }
1408 // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
1409 bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
1410 }
1411
1412 if (VerifyStackAtCalls) {
1413 Unimplemented();
1414 }
1415
1416 C->output()->set_frame_complete(__ offset());
1417
1418 if (C->has_mach_constant_base_node()) {
1419 // NOTE: We set the table base offset here because users might be
1420 // emitted before MachConstantBaseNode.
1421 ConstantTable& constant_table = C->output()->constant_table();
1422 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1423 }
1424 }
1425
1426 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
1427 {
1428 assert_cond(ra_ != nullptr);
1429 return MachNode::size(ra_); // too many variables; just compute it
1430 // the hard way
1431 }
1432
1433 int MachPrologNode::reloc() const
1434 {
1435 return 0;
1436 }
1437
1438 //=============================================================================
1439
1440 #ifndef PRODUCT
1441 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1442 assert_cond(st != nullptr && ra_ != nullptr);
1443 Compile* C = ra_->C;
1444 assert_cond(C != nullptr);
1445 int framesize = C->output()->frame_size_in_bytes();
1446
1447 st->print("# pop frame %d\n\t", framesize);
1448
1449 if (framesize == 0) {
1450 st->print("ld ra, [sp,#%d]\n\t", (2 * wordSize));
1451 st->print("ld fp, [sp,#%d]\n\t", (3 * wordSize));
1452 st->print("add sp, sp, #%d\n\t", (2 * wordSize));
1453 } else {
1454 st->print("add sp, sp, #%d\n\t", framesize);
1455 st->print("ld ra, [sp,#%d]\n\t", - 2 * wordSize);
1456 st->print("ld fp, [sp,#%d]\n\t", - wordSize);
1457 }
1458
1459 if (do_polling() && C->is_method_compilation()) {
1460 st->print("# test polling word\n\t");
1461 st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset()));
1462 st->print("bgtu sp, t0, #slow_path");
1463 }
1464 }
1465 #endif
1466
1467 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1468 assert_cond(ra_ != nullptr);
1469 Compile* C = ra_->C;
1470 assert_cond(C != nullptr);
1471 int framesize = C->output()->frame_size_in_bytes();
1472
1473 __ remove_frame(framesize);
1474
1475 if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
1476 __ reserved_stack_check();
1477 }
1478
1479 if (do_polling() && C->is_method_compilation()) {
1480 Label dummy_label;
1481 Label* code_stub = &dummy_label;
1482 if (!C->output()->in_scratch_emit_size()) {
1483 C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
1484 C->output()->add_stub(stub);
1485 code_stub = &stub->entry();
1486 }
1487 __ relocate(relocInfo::poll_return_type);
1488 __ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
1489 }
1490 }
1491
1492 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1493 assert_cond(ra_ != nullptr);
1494 // Variable size. Determine dynamically.
1495 return MachNode::size(ra_);
1496 }
1497
1498 int MachEpilogNode::reloc() const {
1499 // Return number of relocatable values contained in this instruction.
1500 return 1; // 1 for polling page.
1501 }
1502 const Pipeline * MachEpilogNode::pipeline() const {
1503 return MachNode::pipeline_class();
1504 }
1505
1506 //=============================================================================
1507
1508 // Figure out which register class each belongs in: rc_int, rc_float or
1509 // rc_stack.
1510 enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack };
1511
1512 static enum RC rc_class(OptoReg::Name reg) {
1513
1514 if (reg == OptoReg::Bad) {
1515 return rc_bad;
1516 }
1517
1518 // we have 30 int registers * 2 halves
1519 // (t0 and t1 are omitted)
1520 int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2);
1521 if (reg < slots_of_int_registers) {
1522 return rc_int;
1523 }
1524
1525 // we have 32 float register * 2 halves
1526 int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers;
1527 if (reg < slots_of_int_registers + slots_of_float_registers) {
1528 return rc_float;
1529 }
1530
1531 // we have 32 vector register * 4 halves
1532 int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers;
1533 if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) {
1534 return rc_vector;
1535 }
1536
1537 // Between vector regs & stack is the flags regs.
1538 assert(OptoReg::is_stack(reg), "blow up if spilling flags");
1539
1540 return rc_stack;
1541 }
1542
1543 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
1544 assert_cond(ra_ != nullptr);
1545 Compile* C = ra_->C;
1546
1547 // Get registers to move.
1548 OptoReg::Name src_hi = ra_->get_reg_second(in(1));
1549 OptoReg::Name src_lo = ra_->get_reg_first(in(1));
1550 OptoReg::Name dst_hi = ra_->get_reg_second(this);
1551 OptoReg::Name dst_lo = ra_->get_reg_first(this);
1552
1553 enum RC src_hi_rc = rc_class(src_hi);
1554 enum RC src_lo_rc = rc_class(src_lo);
1555 enum RC dst_hi_rc = rc_class(dst_hi);
1556 enum RC dst_lo_rc = rc_class(dst_lo);
1557
1558 assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
1559
1560 if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
1561 assert((src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1562 (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi,
1563 "expected aligned-adjacent pairs");
1564 }
1565
1566 if (src_lo == dst_lo && src_hi == dst_hi) {
1567 return 0; // Self copy, no move.
1568 }
1569
1570 bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
1571 (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
1572 int src_offset = ra_->reg2offset(src_lo);
1573 int dst_offset = ra_->reg2offset(dst_lo);
1574
1575 if (bottom_type()->isa_vect() != nullptr) {
1576 uint ireg = ideal_reg();
1577 if (ireg == Op_VecA && masm) {
1578 int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
1579 if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1580 // stack to stack
1581 __ spill_copy_vector_stack_to_stack(src_offset, dst_offset,
1582 vector_reg_size_in_bytes);
1583 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
1584 // vpr to stack
1585 __ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
1586 } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
1587 // stack to vpr
1588 __ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
1589 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
1590 // vpr to vpr
1591 __ vsetvli_helper(T_BYTE, MaxVectorSize);
1592 __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
1593 } else {
1594 ShouldNotReachHere();
1595 }
1596 } else if (bottom_type()->isa_vectmask() && masm) {
1597 int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
1598 if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
1599 // stack to stack
1600 __ spill_copy_vmask_stack_to_stack(src_offset, dst_offset,
1601 vmask_size_in_bytes);
1602 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
1603 // vmask to stack
1604 __ spill_vmask(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
1605 } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
1606 // stack to vmask
1607 __ unspill_vmask(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
1608 } else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
1609 // vmask to vmask
1610 __ vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
1611 __ vmv_v_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
1612 } else {
1613 ShouldNotReachHere();
1614 }
1615 }
1616 } else if (masm != nullptr) {
1617 switch (src_lo_rc) {
1618 case rc_int:
1619 if (dst_lo_rc == rc_int) { // gpr --> gpr copy
1620 if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass
1621 __ zext(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32);
1622 } else {
1623 __ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]));
1624 }
1625 } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
1626 if (is64) {
1627 __ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1628 as_Register(Matcher::_regEncode[src_lo]));
1629 } else {
1630 __ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1631 as_Register(Matcher::_regEncode[src_lo]));
1632 }
1633 } else { // gpr --> stack spill
1634 assert(dst_lo_rc == rc_stack, "spill to bad register class");
1635 __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
1636 }
1637 break;
1638 case rc_float:
1639 if (dst_lo_rc == rc_int) { // fpr --> gpr copy
1640 if (is64) {
1641 __ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]),
1642 as_FloatRegister(Matcher::_regEncode[src_lo]));
1643 } else {
1644 __ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]),
1645 as_FloatRegister(Matcher::_regEncode[src_lo]));
1646 }
1647 } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
1648 if (is64) {
1649 __ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1650 as_FloatRegister(Matcher::_regEncode[src_lo]));
1651 } else {
1652 __ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1653 as_FloatRegister(Matcher::_regEncode[src_lo]));
1654 }
1655 } else { // fpr --> stack spill
1656 assert(dst_lo_rc == rc_stack, "spill to bad register class");
1657 __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
1658 is64, dst_offset);
1659 }
1660 break;
1661 case rc_stack:
1662 if (dst_lo_rc == rc_int) { // stack --> gpr load
1663 if (this->ideal_reg() == Op_RegI) {
1664 __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1665 } else { // // zero extended for narrow oop or klass
1666 __ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
1667 }
1668 } else if (dst_lo_rc == rc_float) { // stack --> fpr load
1669 __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
1670 is64, src_offset);
1671 } else { // stack --> stack copy
1672 assert(dst_lo_rc == rc_stack, "spill to bad register class");
1673 if (this->ideal_reg() == Op_RegI) {
1674 __ unspill(t0, is64, src_offset);
1675 } else { // zero extended for narrow oop or klass
1676 __ unspillu(t0, is64, src_offset);
1677 }
1678 __ spill(t0, is64, dst_offset);
1679 }
1680 break;
1681 default:
1682 ShouldNotReachHere();
1683 }
1684 }
1685
1686 if (st != nullptr) {
1687 st->print("spill ");
1688 if (src_lo_rc == rc_stack) {
1689 st->print("[sp, #%d] -> ", src_offset);
1690 } else {
1691 st->print("%s -> ", Matcher::regName[src_lo]);
1692 }
1693 if (dst_lo_rc == rc_stack) {
1694 st->print("[sp, #%d]", dst_offset);
1695 } else {
1696 st->print("%s", Matcher::regName[dst_lo]);
1697 }
1698 if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
1699 int vsize = 0;
1700 if (ideal_reg() == Op_VecA) {
1701 vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
1702 } else {
1703 ShouldNotReachHere();
1704 }
1705 st->print("\t# vector spill size = %d", vsize);
1706 } else if (ideal_reg() == Op_RegVectMask) {
1707 assert(Matcher::supports_scalable_vector(), "bad register type for spill");
1708 int vsize = Matcher::scalable_predicate_reg_slots() * 32;
1709 st->print("\t# vmask spill size = %d", vsize);
1710 } else {
1711 st->print("\t# spill size = %d", is64 ? 64 : 32);
1712 }
1713 }
1714
1715 return 0;
1716 }
1717
1718 #ifndef PRODUCT
1719 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1720 if (ra_ == nullptr) {
1721 st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
1722 } else {
1723 implementation(nullptr, ra_, false, st);
1724 }
1725 }
1726 #endif
1727
1728 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1729 implementation(masm, ra_, false, nullptr);
1730 }
1731
1732 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1733 return MachNode::size(ra_);
1734 }
1735
1736 //=============================================================================
1737
1738 #ifndef PRODUCT
1739 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
1740 assert_cond(ra_ != nullptr && st != nullptr);
1741 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1742 int reg = ra_->get_reg_first(this);
1743 st->print("add %s, sp, #%d\t# box lock",
1744 Matcher::regName[reg], offset);
1745 }
1746 #endif
1747
1748 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
1749 Assembler::IncompressibleScope scope(masm); // Fixed length: see BoxLockNode::size()
1750
1751 assert_cond(ra_ != nullptr);
1752 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1753 int reg = ra_->get_encode(this);
1754
1755 if (Assembler::is_simm12(offset)) {
1756 __ addi(as_Register(reg), sp, offset);
1757 } else {
1758 __ li32(t0, offset);
1759 __ add(as_Register(reg), sp, t0);
1760 }
1761 }
1762
1763 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1764 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
1765 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1766
1767 if (Assembler::is_simm12(offset)) {
1768 return NativeInstruction::instruction_size;
1769 } else {
1770 return 3 * NativeInstruction::instruction_size; // lui + addiw + add;
1771 }
1772 }
1773
1774 //=============================================================================
1775
1776 #ifndef PRODUCT
1777 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
1778 {
1779 assert_cond(st != nullptr);
1780 st->print_cr("# MachUEPNode");
1781 if (UseCompressedClassPointers) {
1782 st->print_cr("\tlwu t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1783 st->print_cr("\tlwu t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
1784 } else {
1785 st->print_cr("\tld t1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
1786 st->print_cr("\tld t2, [t0 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
1787 }
1788 st->print_cr("\tbeq t1, t2, ic_hit");
1789 st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
1790 st->print_cr("\tic_hit:");
1791 }
1792 #endif
1793
1794 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
1795 {
1796 // This is the unverified entry point.
1797 __ ic_check(CodeEntryAlignment);
1798
1799 // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
1800 assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
1801 }
1802
1803 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
1804 {
1805 assert_cond(ra_ != nullptr);
1806 return MachNode::size(ra_);
1807 }
1808
1809 // REQUIRED EMIT CODE
1810
1811 //=============================================================================
1812
1813 // Emit exception handler code.
1814 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
1815 {
1816 // auipc t1, #exception_blob_entry_point
1817 // jr (offset)t1
1818 // Note that the code buffer's insts_mark is always relative to insts.
1819 // That's why we must use the macroassembler to generate a handler.
1820 address base = __ start_a_stub(size_exception_handler());
1821 if (base == nullptr) {
1822 ciEnv::current()->record_failure("CodeCache is full");
1823 return 0; // CodeBuffer::expand failed
1824 }
1825 int offset = __ offset();
1826 __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
1827 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1828 __ end_a_stub();
1829 return offset;
1830 }
1831
1832 // Emit deopt handler code.
1833 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
1834 {
1835 address base = __ start_a_stub(size_deopt_handler());
1836 if (base == nullptr) {
1837 ciEnv::current()->record_failure("CodeCache is full");
1838 return 0; // CodeBuffer::expand failed
1839 }
1840 int offset = __ offset();
1841
1842 __ auipc(ra, 0);
1843 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
1844
1845 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1846 __ end_a_stub();
1847 return offset;
1848
1849 }
1850 // REQUIRED MATCHER CODE
1851
1852 //=============================================================================
1853
1854 bool Matcher::match_rule_supported(int opcode) {
1855 if (!has_match_rule(opcode)) {
1856 return false;
1857 }
1858
1859 switch (opcode) {
1860 case Op_OnSpinWait:
1861 return VM_Version::supports_on_spin_wait();
1862 case Op_CacheWB: // fall through
1863 case Op_CacheWBPreSync: // fall through
1864 case Op_CacheWBPostSync:
1865 if (!VM_Version::supports_data_cache_line_flush()) {
1866 return false;
1867 }
1868 break;
1869
1870 case Op_StrCompressedCopy: // fall through
1871 case Op_StrInflatedCopy: // fall through
1872 case Op_CountPositives: // fall through
1873 case Op_EncodeISOArray:
1874 return UseRVV;
1875
1876 case Op_PopCountI:
1877 case Op_PopCountL:
1878 return UsePopCountInstruction;
1879
1880 case Op_ReverseI:
1881 case Op_ReverseL:
1882 return UseZbkb;
1883
1884 case Op_ReverseBytesI:
1885 case Op_ReverseBytesL:
1886 case Op_ReverseBytesS:
1887 case Op_ReverseBytesUS:
1888 case Op_RotateRight:
1889 case Op_RotateLeft:
1890 case Op_CountLeadingZerosI:
1891 case Op_CountLeadingZerosL:
1892 case Op_CountTrailingZerosI:
1893 case Op_CountTrailingZerosL:
1894 return UseZbb;
1895
1896 case Op_FmaF:
1897 case Op_FmaD:
1898 return UseFMA;
1899
1900 case Op_ConvHF2F:
1901 case Op_ConvF2HF:
1902 return VM_Version::supports_float16_float_conversion();
1903 case Op_ReinterpretS2HF:
1904 case Op_ReinterpretHF2S:
1905 return UseZfh || UseZfhmin;
1906 case Op_AddHF:
1907 case Op_DivHF:
1908 case Op_FmaHF:
1909 case Op_MaxHF:
1910 case Op_MinHF:
1911 case Op_MulHF:
1912 case Op_SqrtHF:
1913 case Op_SubHF:
1914 return UseZfh;
1915
1916 case Op_CMoveF:
1917 case Op_CMoveD:
1918 case Op_CMoveP:
1919 case Op_CMoveN:
1920 return false;
1921 }
1922
1923 return true; // Per default match rules are supported.
1924 }
1925
1926 const RegMask* Matcher::predicate_reg_mask(void) {
1927 return &_VMASK_REG_mask;
1928 }
1929
1930 // Vector calling convention not yet implemented.
1931 bool Matcher::supports_vector_calling_convention(void) {
1932 return EnableVectorSupport;
1933 }
1934
1935 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
1936 assert(EnableVectorSupport, "sanity");
1937 assert(ideal_reg == Op_VecA, "sanity");
1938 // check more info at https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc
1939 int lo = V8_num;
1940 int hi = V8_K_num;
1941 return OptoRegPair(hi, lo);
1942 }
1943
1944 // Is this branch offset short enough that a short branch can be used?
1945 //
1946 // NOTE: If the platform does not provide any short branch variants, then
1947 // this method should return false for offset 0.
1948 // |---label(L1)-----|
1949 // |-----------------|
1950 // |-----------------|----------eq: float-------------------
1951 // |-----------------| // far_cmpD_branch | cmpD_branch
1952 // |------- ---------| feq; | feq;
1953 // |-far_cmpD_branch-| beqz done; | bnez L;
1954 // |-----------------| j L; |
1955 // |-----------------| bind(done); |
1956 // |-----------------|--------------------------------------
1957 // |-----------------| // so shortBrSize = br_size - 4;
1958 // |-----------------| // so offs = offset - shortBrSize + 4;
1959 // |---label(L2)-----|
1960 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1961 // The passed offset is relative to address of the branch.
1962 int shortBrSize = br_size - 4;
1963 int offs = offset - shortBrSize + 4;
1964 return (-4096 <= offs && offs < 4096);
1965 }
1966
1967 // Vector width in bytes.
1968 int Matcher::vector_width_in_bytes(BasicType bt) {
1969 if (UseRVV) {
1970 // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
1971 // MaxVectorSize == VM_Version::_initial_vector_length
1972 int size = MaxVectorSize;
1973 // Minimum 2 values in vector
1974 if (size < 2 * type2aelembytes(bt)) size = 0;
1975 // But never < 4
1976 if (size < 4) size = 0;
1977 return size;
1978 }
1979 return 0;
1980 }
1981
1982 // Limits on vector size (number of elements) loaded into vector.
1983 int Matcher::max_vector_size(const BasicType bt) {
1984 return vector_width_in_bytes(bt) / type2aelembytes(bt);
1985 }
1986
1987 int Matcher::min_vector_size(const BasicType bt) {
1988 int size;
1989 switch(bt) {
1990 case T_BOOLEAN:
1991 // Load/store a vector mask with only 2 elements for vector types
1992 // such as "2I/2F/2L/2D".
1993 size = 2;
1994 break;
1995 case T_BYTE:
1996 // Generate a "4B" vector, to support vector cast between "8B/16B"
1997 // and "4S/4I/4L/4F/4D".
1998 size = 4;
1999 break;
2000 case T_SHORT:
2001 // Generate a "2S" vector, to support vector cast between "4S/8S"
2002 // and "2I/2L/2F/2D".
2003 size = 2;
2004 break;
2005 default:
2006 // Limit the min vector length to 64-bit.
2007 size = 8 / type2aelembytes(bt);
2008 // The number of elements in a vector should be at least 2.
2009 size = MAX2(size, 2);
2010 }
2011
2012 int max_size = max_vector_size(bt);
2013 return MIN2(size, max_size);
2014 }
2015
2016 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
2017 return Matcher::max_vector_size(bt);
2018 }
2019
2020 // Vector ideal reg.
2021 uint Matcher::vector_ideal_reg(int len) {
2022 assert(MaxVectorSize >= len, "");
2023 if (UseRVV) {
2024 return Op_VecA;
2025 }
2026
2027 ShouldNotReachHere();
2028 return 0;
2029 }
2030
2031 int Matcher::scalable_vector_reg_size(const BasicType bt) {
2032 return Matcher::max_vector_size(bt);
2033 }
2034
2035 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
2036 ShouldNotReachHere(); // generic vector operands not supported
2037 return nullptr;
2038 }
2039
2040 bool Matcher::is_reg2reg_move(MachNode* m) {
2041 ShouldNotReachHere(); // generic vector operands not supported
2042 return false;
2043 }
2044
2045 bool Matcher::is_generic_vector(MachOper* opnd) {
2046 ShouldNotReachHere(); // generic vector operands not supported
2047 return false;
2048 }
2049
2050 // Return whether or not this register is ever used as an argument.
2051 // This function is used on startup to build the trampoline stubs in
2052 // generateOptoStub. Registers not mentioned will be killed by the VM
2053 // call in the trampoline, and arguments in those registers not be
2054 // available to the callee.
2055 bool Matcher::can_be_java_arg(int reg)
2056 {
2057 return
2058 reg == R10_num || reg == R10_H_num ||
2059 reg == R11_num || reg == R11_H_num ||
2060 reg == R12_num || reg == R12_H_num ||
2061 reg == R13_num || reg == R13_H_num ||
2062 reg == R14_num || reg == R14_H_num ||
2063 reg == R15_num || reg == R15_H_num ||
2064 reg == R16_num || reg == R16_H_num ||
2065 reg == R17_num || reg == R17_H_num ||
2066 reg == F10_num || reg == F10_H_num ||
2067 reg == F11_num || reg == F11_H_num ||
2068 reg == F12_num || reg == F12_H_num ||
2069 reg == F13_num || reg == F13_H_num ||
2070 reg == F14_num || reg == F14_H_num ||
2071 reg == F15_num || reg == F15_H_num ||
2072 reg == F16_num || reg == F16_H_num ||
2073 reg == F17_num || reg == F17_H_num;
2074 }
2075
2076 bool Matcher::is_spillable_arg(int reg)
2077 {
2078 return can_be_java_arg(reg);
2079 }
2080
2081 uint Matcher::int_pressure_limit()
2082 {
2083 // A derived pointer is live at CallNode and then is flagged by RA
2084 // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
2085 // derived pointers and lastly fail to spill after reaching maximum
2086 // number of iterations. Lowering the default pressure threshold to
2087 // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
2088 // a high register pressure area of the code so that split_DEF can
2089 // generate DefinitionSpillCopy for the derived pointer.
2090 uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
2091 if (!PreserveFramePointer) {
2092 // When PreserveFramePointer is off, frame pointer is allocatable,
2093 // but different from other SOC registers, it is excluded from
2094 // fatproj's mask because its save type is No-Save. Decrease 1 to
2095 // ensure high pressure at fatproj when PreserveFramePointer is off.
2096 // See check_pressure_at_fatproj().
2097 default_int_pressure_threshold--;
2098 }
2099 return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
2100 }
2101
2102 uint Matcher::float_pressure_limit()
2103 {
2104 // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
2105 return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
2106 }
2107
2108 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
2109 return false;
2110 }
2111
2112 RegMask Matcher::divI_proj_mask() {
2113 ShouldNotReachHere();
2114 return RegMask();
2115 }
2116
2117 // Register for MODI projection of divmodI.
2118 RegMask Matcher::modI_proj_mask() {
2119 ShouldNotReachHere();
2120 return RegMask();
2121 }
2122
2123 // Register for DIVL projection of divmodL.
2124 RegMask Matcher::divL_proj_mask() {
2125 ShouldNotReachHere();
2126 return RegMask();
2127 }
2128
2129 // Register for MODL projection of divmodL.
2130 RegMask Matcher::modL_proj_mask() {
2131 ShouldNotReachHere();
2132 return RegMask();
2133 }
2134
2135 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2136 return FP_REG_mask();
2137 }
2138
2139 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
2140 assert_cond(addp != nullptr);
2141 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
2142 Node* u = addp->fast_out(i);
2143 if (u != nullptr && u->is_Mem()) {
2144 int opsize = u->as_Mem()->memory_size();
2145 assert(opsize > 0, "unexpected memory operand size");
2146 if (u->as_Mem()->memory_size() != (1 << shift)) {
2147 return false;
2148 }
2149 }
2150 }
2151 return true;
2152 }
2153
2154 // Binary src (Replicate scalar/immediate)
2155 static bool is_vector_scalar_bitwise_pattern(Node* n, Node* m) {
2156 if (n == nullptr || m == nullptr) {
2157 return false;
2158 }
2159
2160 if (m->Opcode() != Op_Replicate) {
2161 return false;
2162 }
2163
2164 switch (n->Opcode()) {
2165 case Op_AndV:
2166 case Op_OrV:
2167 case Op_XorV:
2168 case Op_AddVB:
2169 case Op_AddVS:
2170 case Op_AddVI:
2171 case Op_AddVL:
2172 case Op_SubVB:
2173 case Op_SubVS:
2174 case Op_SubVI:
2175 case Op_SubVL:
2176 case Op_MulVB:
2177 case Op_MulVS:
2178 case Op_MulVI:
2179 case Op_MulVL: {
2180 return true;
2181 }
2182 default:
2183 return false;
2184 }
2185 }
2186
2187 // (XorV src (Replicate m1))
2188 // (XorVMask src (MaskAll m1))
2189 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
2190 if (n != nullptr && m != nullptr) {
2191 return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
2192 VectorNode::is_all_ones_vector(m);
2193 }
2194 return false;
2195 }
2196
2197 // Should the Matcher clone input 'm' of node 'n'?
2198 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2199 assert_cond(m != nullptr);
2200 if (is_vshift_con_pattern(n, m) || // ShiftV src (ShiftCntV con)
2201 is_vector_bitwise_not_pattern(n, m) ||
2202 is_vector_scalar_bitwise_pattern(n, m) ||
2203 is_encode_and_store_pattern(n, m)) {
2204 mstack.push(m, Visit);
2205 return true;
2206 }
2207 return false;
2208 }
2209
2210 // Should the Matcher clone shifts on addressing modes, expecting them
2211 // to be subsumed into complex addressing expressions or compute them
2212 // into registers?
2213 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2214 return clone_base_plus_offset_address(m, mstack, address_visited);
2215 }
2216
2217 %}
2218
2219
2220
2221 //----------ENCODING BLOCK-----------------------------------------------------
2222 // This block specifies the encoding classes used by the compiler to
2223 // output byte streams. Encoding classes are parameterized macros
2224 // used by Machine Instruction Nodes in order to generate the bit
2225 // encoding of the instruction. Operands specify their base encoding
2226 // interface with the interface keyword. There are currently
2227 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
2228 // COND_INTER. REG_INTER causes an operand to generate a function
2229 // which returns its register number when queried. CONST_INTER causes
2230 // an operand to generate a function which returns the value of the
2231 // constant when queried. MEMORY_INTER causes an operand to generate
2232 // four functions which return the Base Register, the Index Register,
2233 // the Scale Value, and the Offset Value of the operand when queried.
2234 // COND_INTER causes an operand to generate six functions which return
2235 // the encoding code (ie - encoding bits for the instruction)
2236 // associated with each basic boolean condition for a conditional
2237 // instruction.
2238 //
2239 // Instructions specify two basic values for encoding. Again, a
2240 // function is available to check if the constant displacement is an
2241 // oop. They use the ins_encode keyword to specify their encoding
2242 // classes (which must be a sequence of enc_class names, and their
2243 // parameters, specified in the encoding block), and they use the
2244 // opcode keyword to specify, in order, their primary, secondary, and
2245 // tertiary opcode. Only the opcode sections which a particular
2246 // instruction needs for encoding need to be specified.
2247 encode %{
2248 // BEGIN Non-volatile memory access
2249
2250 enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
2251 int64_t con = (int64_t)$src$$constant;
2252 Register dst_reg = as_Register($dst$$reg);
2253 __ mv(dst_reg, con);
2254 %}
2255
2256 enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
2257 Register dst_reg = as_Register($dst$$reg);
2258 address con = (address)$src$$constant;
2259 if (con == nullptr || con == (address)1) {
2260 ShouldNotReachHere();
2261 } else {
2262 relocInfo::relocType rtype = $src->constant_reloc();
2263 if (rtype == relocInfo::oop_type) {
2264 __ movoop(dst_reg, (jobject)con);
2265 } else if (rtype == relocInfo::metadata_type) {
2266 __ mov_metadata(dst_reg, (Metadata*)con);
2267 } else {
2268 assert(rtype == relocInfo::none, "unexpected reloc type");
2269 __ mv(dst_reg, $src$$constant);
2270 }
2271 }
2272 %}
2273
2274 enc_class riscv_enc_mov_p1(iRegP dst) %{
2275 Register dst_reg = as_Register($dst$$reg);
2276 __ mv(dst_reg, 1);
2277 %}
2278
2279 enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{
2280 __ load_byte_map_base($dst$$Register);
2281 %}
2282
2283 enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
2284 Register dst_reg = as_Register($dst$$reg);
2285 address con = (address)$src$$constant;
2286 if (con == nullptr) {
2287 ShouldNotReachHere();
2288 } else {
2289 relocInfo::relocType rtype = $src->constant_reloc();
2290 assert(rtype == relocInfo::oop_type, "unexpected reloc type");
2291 __ set_narrow_oop(dst_reg, (jobject)con);
2292 }
2293 %}
2294
2295 enc_class riscv_enc_mov_zero(iRegNorP dst) %{
2296 Register dst_reg = as_Register($dst$$reg);
2297 __ mv(dst_reg, zr);
2298 %}
2299
2300 enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
2301 Register dst_reg = as_Register($dst$$reg);
2302 address con = (address)$src$$constant;
2303 if (con == nullptr) {
2304 ShouldNotReachHere();
2305 } else {
2306 relocInfo::relocType rtype = $src->constant_reloc();
2307 assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
2308 __ set_narrow_klass(dst_reg, (Klass *)con);
2309 }
2310 %}
2311
2312 // compare and branch instruction encodings
2313
2314 enc_class riscv_enc_j(label lbl) %{
2315 Label* L = $lbl$$label;
2316 __ j(*L);
2317 %}
2318
2319 enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
2320 Label* L = $lbl$$label;
2321 switch ($cmp$$cmpcode) {
2322 case(BoolTest::ge):
2323 __ j(*L);
2324 break;
2325 case(BoolTest::lt):
2326 break;
2327 default:
2328 Unimplemented();
2329 }
2330 %}
2331
2332 // call instruction encodings
2333
2334 enc_class riscv_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) %{
2335 Register sub_reg = as_Register($sub$$reg);
2336 Register super_reg = as_Register($super$$reg);
2337 Register temp_reg = as_Register($temp$$reg);
2338 Register result_reg = as_Register($result$$reg);
2339 Register cr_reg = t1;
2340
2341 Label miss;
2342 Label done;
2343 __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
2344 nullptr, &miss, /*set_cond_codes*/ true);
2345 if ($primary) {
2346 __ mv(result_reg, zr);
2347 } else {
2348 __ mv(cr_reg, zr);
2349 __ j(done);
2350 }
2351
2352 __ bind(miss);
2353 if (!$primary) {
2354 __ mv(cr_reg, 1);
2355 }
2356
2357 __ bind(done);
2358 %}
2359
2360 enc_class riscv_enc_java_static_call(method meth) %{
2361 Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
2362
2363 address addr = (address)$meth$$method;
2364 address call = nullptr;
2365 assert_cond(addr != nullptr);
2366 if (!_method) {
2367 // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
2368 call = __ reloc_call(Address(addr, relocInfo::runtime_call_type));
2369 if (call == nullptr) {
2370 ciEnv::current()->record_failure("CodeCache is full");
2371 return;
2372 }
2373 } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
2374 // The NOP here is purely to ensure that eliding a call to
2375 // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
2376 __ nop();
2377 __ nop();
2378 __ nop();
2379 __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
2380 } else {
2381 int method_index = resolved_method_index(masm);
2382 RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
2383 : static_call_Relocation::spec(method_index);
2384 call = __ reloc_call(Address(addr, rspec));
2385 if (call == nullptr) {
2386 ciEnv::current()->record_failure("CodeCache is full");
2387 return;
2388 }
2389
2390 if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
2391 // Calls of the same statically bound method can share
2392 // a stub to the interpreter.
2393 __ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
2394 } else {
2395 // Emit stub for static call
2396 address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
2397 if (stub == nullptr) {
2398 ciEnv::current()->record_failure("CodeCache is full");
2399 return;
2400 }
2401 }
2402 }
2403
2404 __ post_call_nop();
2405 %}
2406
2407 enc_class riscv_enc_java_dynamic_call(method meth) %{
2408 Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
2409 int method_index = resolved_method_index(masm);
2410 address call = __ ic_call((address)$meth$$method, method_index);
2411 if (call == nullptr) {
2412 ciEnv::current()->record_failure("CodeCache is full");
2413 return;
2414 }
2415
2416 __ post_call_nop();
2417 %}
2418
2419 enc_class riscv_enc_call_epilog() %{
2420 if (VerifyStackAtCalls) {
2421 // Check that stack depth is unchanged: find majik cookie on stack
2422 __ call_Unimplemented();
2423 }
2424 %}
2425
2426 enc_class riscv_enc_java_to_runtime(method meth) %{
2427 Assembler::IncompressibleScope scope(masm); // Fixed length: see ret_addr_offset
2428
2429 // Some calls to generated routines (arraycopy code) are scheduled by C2
2430 // as runtime calls. if so we can call them using a far call (they will be
2431 // in the code cache, thus in a reachable segment) otherwise we have to use
2432 // a movptr+jalr pair which loads the absolute address into a register.
2433 address entry = (address)$meth$$method;
2434 if (CodeCache::contains(entry)) {
2435 __ far_call(Address(entry, relocInfo::runtime_call_type));
2436 __ post_call_nop();
2437 } else {
2438 Label retaddr;
2439 // Make the anchor frame walkable
2440 __ la(t0, retaddr);
2441 __ sd(t0, Address(xthread, JavaThread::last_Java_pc_offset()));
2442 int32_t offset = 0;
2443 // No relocation needed
2444 __ movptr(t1, entry, offset, t0); // lui + lui + slli + add
2445 __ jalr(t1, offset);
2446 __ bind(retaddr);
2447 __ post_call_nop();
2448 }
2449 %}
2450
2451 enc_class riscv_enc_tail_call(iRegP jump_target) %{
2452 Register target_reg = as_Register($jump_target$$reg);
2453 __ jr(target_reg);
2454 %}
2455
2456 enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
2457 Register target_reg = as_Register($jump_target$$reg);
2458 // exception oop should be in x10
2459 // ret addr has been popped into ra
2460 // callee expects it in x13
2461 __ mv(x13, ra);
2462 __ jr(target_reg);
2463 %}
2464
2465 enc_class riscv_enc_rethrow() %{
2466 __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
2467 %}
2468
2469 enc_class riscv_enc_ret() %{
2470 __ ret();
2471 %}
2472
2473 %}
2474
2475 //----------FRAME--------------------------------------------------------------
2476 // Definition of frame structure and management information.
2477 //
2478 // S T A C K L A Y O U T Allocators stack-slot number
2479 // | (to get allocators register number
2480 // G Owned by | | v add OptoReg::stack0())
2481 // r CALLER | |
2482 // o | +--------+ pad to even-align allocators stack-slot
2483 // w V | pad0 | numbers; owned by CALLER
2484 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
2485 // h ^ | in | 5
2486 // | | args | 4 Holes in incoming args owned by SELF
2487 // | | | | 3
2488 // | | +--------+
2489 // V | | old out| Empty on Intel, window on Sparc
2490 // | old |preserve| Must be even aligned.
2491 // | SP-+--------+----> Matcher::_old_SP, even aligned
2492 // | | in | 3 area for Intel ret address
2493 // Owned by |preserve| Empty on Sparc.
2494 // SELF +--------+
2495 // | | pad2 | 2 pad to align old SP
2496 // | +--------+ 1
2497 // | | locks | 0
2498 // | +--------+----> OptoReg::stack0(), even aligned
2499 // | | pad1 | 11 pad to align new SP
2500 // | +--------+
2501 // | | | 10
2502 // | | spills | 9 spills
2503 // V | | 8 (pad0 slot for callee)
2504 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
2505 // ^ | out | 7
2506 // | | args | 6 Holes in outgoing args owned by CALLEE
2507 // Owned by +--------+
2508 // CALLEE | new out| 6 Empty on Intel, window on Sparc
2509 // | new |preserve| Must be even-aligned.
2510 // | SP-+--------+----> Matcher::_new_SP, even aligned
2511 // | | |
2512 //
2513 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
2514 // known from SELF's arguments and the Java calling convention.
2515 // Region 6-7 is determined per call site.
2516 // Note 2: If the calling convention leaves holes in the incoming argument
2517 // area, those holes are owned by SELF. Holes in the outgoing area
2518 // are owned by the CALLEE. Holes should not be necessary in the
2519 // incoming area, as the Java calling convention is completely under
2520 // the control of the AD file. Doubles can be sorted and packed to
2521 // avoid holes. Holes in the outgoing arguments may be necessary for
2522 // varargs C calling conventions.
2523 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
2524 // even aligned with pad0 as needed.
2525 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
2526 // (the latter is true on Intel but is it false on RISCV?)
2527 // region 6-11 is even aligned; it may be padded out more so that
2528 // the region from SP to FP meets the minimum stack alignment.
2529 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
2530 // alignment. Region 11, pad1, may be dynamically extended so that
2531 // SP meets the minimum alignment.
2532
2533 frame %{
2534 // These three registers define part of the calling convention
2535 // between compiled code and the interpreter.
2536
2537 // Inline Cache Register or methodOop for I2C.
2538 inline_cache_reg(R31);
2539
2540 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
2541 cisc_spilling_operand_name(indOffset);
2542
2543 // Number of stack slots consumed by locking an object
2544 // generate Compile::sync_stack_slots
2545 // VMRegImpl::slots_per_word = wordSize / stack_slot_size = 8 / 4 = 2
2546 sync_stack_slots(1 * VMRegImpl::slots_per_word);
2547
2548 // Compiled code's Frame Pointer
2549 frame_pointer(R2);
2550
2551 // Interpreter stores its frame pointer in a register which is
2552 // stored to the stack by I2CAdaptors.
2553 // I2CAdaptors convert from interpreted java to compiled java.
2554 interpreter_frame_pointer(R8);
2555
2556 // Stack alignment requirement
2557 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
2558
2559 // Number of outgoing stack slots killed above the out_preserve_stack_slots
2560 // for calls to C. Supports the var-args backing area for register parms.
2561 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes / BytesPerInt);
2562
2563 // The after-PROLOG location of the return address. Location of
2564 // return address specifies a type (REG or STACK) and a number
2565 // representing the register number (i.e. - use a register name) or
2566 // stack slot.
2567 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
2568 // Otherwise, it is above the locks and verification slot and alignment word
2569 // TODO this may well be correct but need to check why that - 2 is there
2570 // ppc port uses 0 but we definitely need to allow for fixed_slots
2571 // which folds in the space used for monitors
2572 return_addr(STACK - 2 +
2573 align_up((Compile::current()->in_preserve_stack_slots() +
2574 Compile::current()->fixed_slots()),
2575 stack_alignment_in_slots()));
2576
2577 // Location of compiled Java return values. Same as C for now.
2578 return_value
2579 %{
2580 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
2581 "only return normal values");
2582
2583 static const int lo[Op_RegL + 1] = { // enum name
2584 0, // Op_Node
2585 0, // Op_Set
2586 R10_num, // Op_RegN
2587 R10_num, // Op_RegI
2588 R10_num, // Op_RegP
2589 F10_num, // Op_RegF
2590 F10_num, // Op_RegD
2591 R10_num // Op_RegL
2592 };
2593
2594 static const int hi[Op_RegL + 1] = { // enum name
2595 0, // Op_Node
2596 0, // Op_Set
2597 OptoReg::Bad, // Op_RegN
2598 OptoReg::Bad, // Op_RegI
2599 R10_H_num, // Op_RegP
2600 OptoReg::Bad, // Op_RegF
2601 F10_H_num, // Op_RegD
2602 R10_H_num // Op_RegL
2603 };
2604
2605 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
2606 %}
2607 %}
2608
2609 //----------ATTRIBUTES---------------------------------------------------------
2610 //----------Operand Attributes-------------------------------------------------
2611 op_attrib op_cost(1); // Required cost attribute
2612
2613 //----------Instruction Attributes---------------------------------------------
2614 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
2615 ins_attrib ins_size(32); // Required size attribute (in bits)
2616 ins_attrib ins_short_branch(0); // Required flag: is this instruction
2617 // a non-matching short branch variant
2618 // of some long branch?
2619 ins_attrib ins_alignment(4); // Required alignment attribute (must
2620 // be a power of 2) specifies the
2621 // alignment that some part of the
2622 // instruction (not necessarily the
2623 // start) requires. If > 1, a
2624 // compute_padding() function must be
2625 // provided for the instruction
2626
2627 // Whether this node is expanded during code emission into a sequence of
2628 // instructions and the first instruction can perform an implicit null check.
2629 ins_attrib ins_is_late_expanded_null_check_candidate(false);
2630
2631 //----------OPERANDS-----------------------------------------------------------
2632 // Operand definitions must precede instruction definitions for correct parsing
2633 // in the ADLC because operands constitute user defined types which are used in
2634 // instruction definitions.
2635
2636 //----------Simple Operands----------------------------------------------------
2637
2638 // Integer operands 32 bit
2639 // 32 bit immediate
2640 operand immI()
2641 %{
2642 match(ConI);
2643
2644 op_cost(0);
2645 format %{ %}
2646 interface(CONST_INTER);
2647 %}
2648
2649 // 32 bit zero
2650 operand immI0()
2651 %{
2652 predicate(n->get_int() == 0);
2653 match(ConI);
2654
2655 op_cost(0);
2656 format %{ %}
2657 interface(CONST_INTER);
2658 %}
2659
2660 // 32 bit unit increment
2661 operand immI_1()
2662 %{
2663 predicate(n->get_int() == 1);
2664 match(ConI);
2665
2666 op_cost(0);
2667 format %{ %}
2668 interface(CONST_INTER);
2669 %}
2670
2671 // 32 bit unit decrement
2672 operand immI_M1()
2673 %{
2674 predicate(n->get_int() == -1);
2675 match(ConI);
2676
2677 op_cost(0);
2678 format %{ %}
2679 interface(CONST_INTER);
2680 %}
2681
2682 // Unsigned Integer Immediate: 6-bit int, greater than 32
2683 operand uimmI6_ge32() %{
2684 predicate(((unsigned int)(n->get_int()) < 64) && (n->get_int() >= 32));
2685 match(ConI);
2686 op_cost(0);
2687 format %{ %}
2688 interface(CONST_INTER);
2689 %}
2690
2691 operand immI_le_4()
2692 %{
2693 predicate(n->get_int() <= 4);
2694 match(ConI);
2695
2696 op_cost(0);
2697 format %{ %}
2698 interface(CONST_INTER);
2699 %}
2700
2701 operand immI_16()
2702 %{
2703 predicate(n->get_int() == 16);
2704 match(ConI);
2705 op_cost(0);
2706 format %{ %}
2707 interface(CONST_INTER);
2708 %}
2709
2710 operand immI_24()
2711 %{
2712 predicate(n->get_int() == 24);
2713 match(ConI);
2714 op_cost(0);
2715 format %{ %}
2716 interface(CONST_INTER);
2717 %}
2718
2719 operand immI_31()
2720 %{
2721 predicate(n->get_int() == 31);
2722 match(ConI);
2723
2724 op_cost(0);
2725 format %{ %}
2726 interface(CONST_INTER);
2727 %}
2728
2729 operand immI_63()
2730 %{
2731 predicate(n->get_int() == 63);
2732 match(ConI);
2733
2734 op_cost(0);
2735 format %{ %}
2736 interface(CONST_INTER);
2737 %}
2738
2739 // 32 bit integer valid for add immediate
2740 operand immIAdd()
2741 %{
2742 predicate(Assembler::is_simm12((int64_t)n->get_int()));
2743 match(ConI);
2744 op_cost(0);
2745 format %{ %}
2746 interface(CONST_INTER);
2747 %}
2748
2749 // 32 bit integer valid for sub immediate
2750 operand immISub()
2751 %{
2752 predicate(Assembler::is_simm12(-(int64_t)n->get_int()));
2753 match(ConI);
2754 op_cost(0);
2755 format %{ %}
2756 interface(CONST_INTER);
2757 %}
2758
2759 // 5 bit signed value.
2760 operand immI5()
2761 %{
2762 predicate(n->get_int() <= 15 && n->get_int() >= -16);
2763 match(ConI);
2764
2765 op_cost(0);
2766 format %{ %}
2767 interface(CONST_INTER);
2768 %}
2769
2770 // 5 bit signed value (simm5)
2771 operand immL5()
2772 %{
2773 predicate(n->get_long() <= 15 && n->get_long() >= -16);
2774 match(ConL);
2775
2776 op_cost(0);
2777 format %{ %}
2778 interface(CONST_INTER);
2779 %}
2780
2781 // Integer operands 64 bit
2782 // 64 bit immediate
2783 operand immL()
2784 %{
2785 match(ConL);
2786
2787 op_cost(0);
2788 format %{ %}
2789 interface(CONST_INTER);
2790 %}
2791
2792 // 64 bit zero
2793 operand immL0()
2794 %{
2795 predicate(n->get_long() == 0);
2796 match(ConL);
2797
2798 op_cost(0);
2799 format %{ %}
2800 interface(CONST_INTER);
2801 %}
2802
2803 // Pointer operands
2804 // Pointer Immediate
2805 operand immP()
2806 %{
2807 match(ConP);
2808
2809 op_cost(0);
2810 format %{ %}
2811 interface(CONST_INTER);
2812 %}
2813
2814 // Null Pointer Immediate
2815 operand immP0()
2816 %{
2817 predicate(n->get_ptr() == 0);
2818 match(ConP);
2819
2820 op_cost(0);
2821 format %{ %}
2822 interface(CONST_INTER);
2823 %}
2824
2825 // Pointer Immediate One
2826 // this is used in object initialization (initial object header)
2827 operand immP_1()
2828 %{
2829 predicate(n->get_ptr() == 1);
2830 match(ConP);
2831
2832 op_cost(0);
2833 format %{ %}
2834 interface(CONST_INTER);
2835 %}
2836
2837 // Card Table Byte Map Base
2838 operand immByteMapBase()
2839 %{
2840 // Get base of card map
2841 predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
2842 SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
2843 (CardTable::CardValue*)n->get_ptr() ==
2844 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
2845 match(ConP);
2846
2847 op_cost(0);
2848 format %{ %}
2849 interface(CONST_INTER);
2850 %}
2851
2852 // Int Immediate: low 16-bit mask
2853 operand immI_16bits()
2854 %{
2855 predicate(n->get_int() == 0xFFFF);
2856 match(ConI);
2857 op_cost(0);
2858 format %{ %}
2859 interface(CONST_INTER);
2860 %}
2861
2862 operand immIpowerOf2() %{
2863 predicate(is_power_of_2((juint)(n->get_int())));
2864 match(ConI);
2865 op_cost(0);
2866 format %{ %}
2867 interface(CONST_INTER);
2868 %}
2869
2870 // Long Immediate: low 32-bit mask
2871 operand immL_32bits()
2872 %{
2873 predicate(n->get_long() == 0xFFFFFFFFL);
2874 match(ConL);
2875 op_cost(0);
2876 format %{ %}
2877 interface(CONST_INTER);
2878 %}
2879
2880 // 64 bit unit decrement
2881 operand immL_M1()
2882 %{
2883 predicate(n->get_long() == -1);
2884 match(ConL);
2885
2886 op_cost(0);
2887 format %{ %}
2888 interface(CONST_INTER);
2889 %}
2890
2891
2892 // 64 bit integer valid for add immediate
2893 operand immLAdd()
2894 %{
2895 predicate(Assembler::is_simm12(n->get_long()));
2896 match(ConL);
2897 op_cost(0);
2898 format %{ %}
2899 interface(CONST_INTER);
2900 %}
2901
2902 // 64 bit integer valid for sub immediate
2903 operand immLSub()
2904 %{
2905 predicate(Assembler::is_simm12(-(n->get_long())));
2906 match(ConL);
2907 op_cost(0);
2908 format %{ %}
2909 interface(CONST_INTER);
2910 %}
2911
2912 // Narrow pointer operands
2913 // Narrow Pointer Immediate
2914 operand immN()
2915 %{
2916 match(ConN);
2917
2918 op_cost(0);
2919 format %{ %}
2920 interface(CONST_INTER);
2921 %}
2922
2923 // Narrow Null Pointer Immediate
2924 operand immN0()
2925 %{
2926 predicate(n->get_narrowcon() == 0);
2927 match(ConN);
2928
2929 op_cost(0);
2930 format %{ %}
2931 interface(CONST_INTER);
2932 %}
2933
2934 operand immNKlass()
2935 %{
2936 match(ConNKlass);
2937
2938 op_cost(0);
2939 format %{ %}
2940 interface(CONST_INTER);
2941 %}
2942
2943 // Float and Double operands
2944 // Double Immediate
2945 operand immD()
2946 %{
2947 match(ConD);
2948 op_cost(0);
2949 format %{ %}
2950 interface(CONST_INTER);
2951 %}
2952
2953 // Double Immediate: +0.0d
2954 operand immD0()
2955 %{
2956 predicate(jlong_cast(n->getd()) == 0);
2957 match(ConD);
2958
2959 op_cost(0);
2960 format %{ %}
2961 interface(CONST_INTER);
2962 %}
2963
2964 // Float Immediate
2965 operand immF()
2966 %{
2967 match(ConF);
2968 op_cost(0);
2969 format %{ %}
2970 interface(CONST_INTER);
2971 %}
2972
2973 // Float Immediate: +0.0f.
2974 operand immF0()
2975 %{
2976 predicate(jint_cast(n->getf()) == 0);
2977 match(ConF);
2978
2979 op_cost(0);
2980 format %{ %}
2981 interface(CONST_INTER);
2982 %}
2983
2984 // Half Float Immediate
2985 operand immH()
2986 %{
2987 match(ConH);
2988
2989 op_cost(0);
2990 format %{ %}
2991 interface(CONST_INTER);
2992 %}
2993
2994 // Half Float Immediate: +0.0f.
2995 operand immH0()
2996 %{
2997 predicate(jint_cast(n->geth()) == 0);
2998 match(ConH);
2999
3000 op_cost(0);
3001 format %{ %}
3002 interface(CONST_INTER);
3003 %}
3004
3005 operand immIOffset()
3006 %{
3007 predicate(Assembler::is_simm12(n->get_int()));
3008 match(ConI);
3009 op_cost(0);
3010 format %{ %}
3011 interface(CONST_INTER);
3012 %}
3013
3014 operand immLOffset()
3015 %{
3016 predicate(Assembler::is_simm12(n->get_long()));
3017 match(ConL);
3018 op_cost(0);
3019 format %{ %}
3020 interface(CONST_INTER);
3021 %}
3022
3023 // Scale values
3024 operand immIScale()
3025 %{
3026 predicate(1 <= n->get_int() && (n->get_int() <= 3));
3027 match(ConI);
3028
3029 op_cost(0);
3030 format %{ %}
3031 interface(CONST_INTER);
3032 %}
3033
3034 // Integer 32 bit Register Operands
3035 operand iRegI()
3036 %{
3037 constraint(ALLOC_IN_RC(any_reg32));
3038 match(RegI);
3039 match(iRegINoSp);
3040 op_cost(0);
3041 format %{ %}
3042 interface(REG_INTER);
3043 %}
3044
3045 // Integer 32 bit Register not Special
3046 operand iRegINoSp()
3047 %{
3048 constraint(ALLOC_IN_RC(no_special_reg32));
3049 match(RegI);
3050 op_cost(0);
3051 format %{ %}
3052 interface(REG_INTER);
3053 %}
3054
3055 // Register R10 only
3056 operand iRegI_R10()
3057 %{
3058 constraint(ALLOC_IN_RC(int_r10_reg));
3059 match(RegI);
3060 match(iRegINoSp);
3061 op_cost(0);
3062 format %{ %}
3063 interface(REG_INTER);
3064 %}
3065
3066 // Register R12 only
3067 operand iRegI_R12()
3068 %{
3069 constraint(ALLOC_IN_RC(int_r12_reg));
3070 match(RegI);
3071 match(iRegINoSp);
3072 op_cost(0);
3073 format %{ %}
3074 interface(REG_INTER);
3075 %}
3076
3077 // Register R13 only
3078 operand iRegI_R13()
3079 %{
3080 constraint(ALLOC_IN_RC(int_r13_reg));
3081 match(RegI);
3082 match(iRegINoSp);
3083 op_cost(0);
3084 format %{ %}
3085 interface(REG_INTER);
3086 %}
3087
3088 // Register R14 only
3089 operand iRegI_R14()
3090 %{
3091 constraint(ALLOC_IN_RC(int_r14_reg));
3092 match(RegI);
3093 match(iRegINoSp);
3094 op_cost(0);
3095 format %{ %}
3096 interface(REG_INTER);
3097 %}
3098
3099 // Integer 64 bit Register Operands
3100 operand iRegL()
3101 %{
3102 constraint(ALLOC_IN_RC(any_reg));
3103 match(RegL);
3104 match(iRegLNoSp);
3105 op_cost(0);
3106 format %{ %}
3107 interface(REG_INTER);
3108 %}
3109
3110 // Integer 64 bit Register not Special
3111 operand iRegLNoSp()
3112 %{
3113 constraint(ALLOC_IN_RC(no_special_reg));
3114 match(RegL);
3115 match(iRegL_R10);
3116 format %{ %}
3117 interface(REG_INTER);
3118 %}
3119
3120 // Long 64 bit Register R29 only
3121 operand iRegL_R29()
3122 %{
3123 constraint(ALLOC_IN_RC(r29_reg));
3124 match(RegL);
3125 match(iRegLNoSp);
3126 op_cost(0);
3127 format %{ %}
3128 interface(REG_INTER);
3129 %}
3130
3131 // Long 64 bit Register R30 only
3132 operand iRegL_R30()
3133 %{
3134 constraint(ALLOC_IN_RC(r30_reg));
3135 match(RegL);
3136 match(iRegLNoSp);
3137 op_cost(0);
3138 format %{ %}
3139 interface(REG_INTER);
3140 %}
3141
3142 // Pointer Register Operands
3143 // Pointer Register
3144 operand iRegP()
3145 %{
3146 constraint(ALLOC_IN_RC(ptr_reg));
3147 match(RegP);
3148 match(iRegPNoSp);
3149 match(iRegP_R10);
3150 match(iRegP_R15);
3151 match(javaThread_RegP);
3152 op_cost(0);
3153 format %{ %}
3154 interface(REG_INTER);
3155 %}
3156
3157 // Pointer 64 bit Register not Special
3158 operand iRegPNoSp()
3159 %{
3160 constraint(ALLOC_IN_RC(no_special_ptr_reg));
3161 match(RegP);
3162 op_cost(0);
3163 format %{ %}
3164 interface(REG_INTER);
3165 %}
3166
3167 // This operand is not allowed to use fp even if
3168 // fp is not used to hold the frame pointer.
3169 operand iRegPNoSpNoFp()
3170 %{
3171 constraint(ALLOC_IN_RC(no_special_no_fp_ptr_reg));
3172 match(RegP);
3173 match(iRegPNoSp);
3174 op_cost(0);
3175 format %{ %}
3176 interface(REG_INTER);
3177 %}
3178
3179 operand iRegP_R10()
3180 %{
3181 constraint(ALLOC_IN_RC(r10_reg));
3182 match(RegP);
3183 // match(iRegP);
3184 match(iRegPNoSp);
3185 op_cost(0);
3186 format %{ %}
3187 interface(REG_INTER);
3188 %}
3189
3190 // Pointer 64 bit Register R11 only
3191 operand iRegP_R11()
3192 %{
3193 constraint(ALLOC_IN_RC(r11_reg));
3194 match(RegP);
3195 match(iRegPNoSp);
3196 op_cost(0);
3197 format %{ %}
3198 interface(REG_INTER);
3199 %}
3200
3201 operand iRegP_R12()
3202 %{
3203 constraint(ALLOC_IN_RC(r12_reg));
3204 match(RegP);
3205 // match(iRegP);
3206 match(iRegPNoSp);
3207 op_cost(0);
3208 format %{ %}
3209 interface(REG_INTER);
3210 %}
3211
3212 // Pointer 64 bit Register R13 only
3213 operand iRegP_R13()
3214 %{
3215 constraint(ALLOC_IN_RC(r13_reg));
3216 match(RegP);
3217 match(iRegPNoSp);
3218 op_cost(0);
3219 format %{ %}
3220 interface(REG_INTER);
3221 %}
3222
3223 operand iRegP_R14()
3224 %{
3225 constraint(ALLOC_IN_RC(r14_reg));
3226 match(RegP);
3227 // match(iRegP);
3228 match(iRegPNoSp);
3229 op_cost(0);
3230 format %{ %}
3231 interface(REG_INTER);
3232 %}
3233
3234 operand iRegP_R15()
3235 %{
3236 constraint(ALLOC_IN_RC(r15_reg));
3237 match(RegP);
3238 // match(iRegP);
3239 match(iRegPNoSp);
3240 op_cost(0);
3241 format %{ %}
3242 interface(REG_INTER);
3243 %}
3244
3245 operand iRegP_R16()
3246 %{
3247 constraint(ALLOC_IN_RC(r16_reg));
3248 match(RegP);
3249 match(iRegPNoSp);
3250 op_cost(0);
3251 format %{ %}
3252 interface(REG_INTER);
3253 %}
3254
3255 // Pointer 64 bit Register R28 only
3256 operand iRegP_R28()
3257 %{
3258 constraint(ALLOC_IN_RC(r28_reg));
3259 match(RegP);
3260 match(iRegPNoSp);
3261 op_cost(0);
3262 format %{ %}
3263 interface(REG_INTER);
3264 %}
3265
3266 // Pointer 64 bit Register R30 only
3267 operand iRegP_R30()
3268 %{
3269 constraint(ALLOC_IN_RC(r30_reg));
3270 match(RegP);
3271 match(iRegPNoSp);
3272 op_cost(0);
3273 format %{ %}
3274 interface(REG_INTER);
3275 %}
3276
3277 // Pointer 64 bit Register R31 only
3278 operand iRegP_R31()
3279 %{
3280 constraint(ALLOC_IN_RC(r31_reg));
3281 match(RegP);
3282 match(iRegPNoSp);
3283 op_cost(0);
3284 format %{ %}
3285 interface(REG_INTER);
3286 %}
3287
3288 // Pointer Register Operands
3289 // Narrow Pointer Register
3290 operand iRegN()
3291 %{
3292 constraint(ALLOC_IN_RC(any_reg32));
3293 match(RegN);
3294 match(iRegNNoSp);
3295 op_cost(0);
3296 format %{ %}
3297 interface(REG_INTER);
3298 %}
3299
3300 // Integer 64 bit Register not Special
3301 operand iRegNNoSp()
3302 %{
3303 constraint(ALLOC_IN_RC(no_special_reg32));
3304 match(RegN);
3305 op_cost(0);
3306 format %{ %}
3307 interface(REG_INTER);
3308 %}
3309
3310 // Long 64 bit Register R10 only
3311 operand iRegL_R10()
3312 %{
3313 constraint(ALLOC_IN_RC(r10_reg));
3314 match(RegL);
3315 match(iRegLNoSp);
3316 op_cost(0);
3317 format %{ %}
3318 interface(REG_INTER);
3319 %}
3320
3321 // Float Register
3322 // Float register operands
3323 operand fRegF()
3324 %{
3325 constraint(ALLOC_IN_RC(float_reg));
3326 match(RegF);
3327
3328 op_cost(0);
3329 format %{ %}
3330 interface(REG_INTER);
3331 %}
3332
3333 // Double Register
3334 // Double register operands
3335 operand fRegD()
3336 %{
3337 constraint(ALLOC_IN_RC(double_reg));
3338 match(RegD);
3339
3340 op_cost(0);
3341 format %{ %}
3342 interface(REG_INTER);
3343 %}
3344
3345 // Generic vector class. This will be used for
3346 // all vector operands.
3347 operand vReg()
3348 %{
3349 constraint(ALLOC_IN_RC(vectora_reg));
3350 match(VecA);
3351 op_cost(0);
3352 format %{ %}
3353 interface(REG_INTER);
3354 %}
3355
3356 operand vReg_V1()
3357 %{
3358 constraint(ALLOC_IN_RC(v1_reg));
3359 match(VecA);
3360 match(vReg);
3361 op_cost(0);
3362 format %{ %}
3363 interface(REG_INTER);
3364 %}
3365
3366 operand vReg_V2()
3367 %{
3368 constraint(ALLOC_IN_RC(v2_reg));
3369 match(VecA);
3370 match(vReg);
3371 op_cost(0);
3372 format %{ %}
3373 interface(REG_INTER);
3374 %}
3375
3376 operand vReg_V3()
3377 %{
3378 constraint(ALLOC_IN_RC(v3_reg));
3379 match(VecA);
3380 match(vReg);
3381 op_cost(0);
3382 format %{ %}
3383 interface(REG_INTER);
3384 %}
3385
3386 operand vReg_V4()
3387 %{
3388 constraint(ALLOC_IN_RC(v4_reg));
3389 match(VecA);
3390 match(vReg);
3391 op_cost(0);
3392 format %{ %}
3393 interface(REG_INTER);
3394 %}
3395
3396 operand vReg_V5()
3397 %{
3398 constraint(ALLOC_IN_RC(v5_reg));
3399 match(VecA);
3400 match(vReg);
3401 op_cost(0);
3402 format %{ %}
3403 interface(REG_INTER);
3404 %}
3405
3406 operand vReg_V6()
3407 %{
3408 constraint(ALLOC_IN_RC(v6_reg));
3409 match(VecA);
3410 match(vReg);
3411 op_cost(0);
3412 format %{ %}
3413 interface(REG_INTER);
3414 %}
3415
3416 operand vReg_V7()
3417 %{
3418 constraint(ALLOC_IN_RC(v7_reg));
3419 match(VecA);
3420 match(vReg);
3421 op_cost(0);
3422 format %{ %}
3423 interface(REG_INTER);
3424 %}
3425
3426 operand vReg_V8()
3427 %{
3428 constraint(ALLOC_IN_RC(v8_reg));
3429 match(VecA);
3430 match(vReg);
3431 op_cost(0);
3432 format %{ %}
3433 interface(REG_INTER);
3434 %}
3435
3436 operand vReg_V9()
3437 %{
3438 constraint(ALLOC_IN_RC(v9_reg));
3439 match(VecA);
3440 match(vReg);
3441 op_cost(0);
3442 format %{ %}
3443 interface(REG_INTER);
3444 %}
3445
3446 operand vReg_V10()
3447 %{
3448 constraint(ALLOC_IN_RC(v10_reg));
3449 match(VecA);
3450 match(vReg);
3451 op_cost(0);
3452 format %{ %}
3453 interface(REG_INTER);
3454 %}
3455
3456 operand vReg_V11()
3457 %{
3458 constraint(ALLOC_IN_RC(v11_reg));
3459 match(VecA);
3460 match(vReg);
3461 op_cost(0);
3462 format %{ %}
3463 interface(REG_INTER);
3464 %}
3465
3466 operand vRegMask()
3467 %{
3468 constraint(ALLOC_IN_RC(vmask_reg));
3469 match(RegVectMask);
3470 match(vRegMask_V0);
3471 op_cost(0);
3472 format %{ %}
3473 interface(REG_INTER);
3474 %}
3475
3476 // The mask value used to control execution of a masked
3477 // vector instruction is always supplied by vector register v0.
3478 operand vRegMask_V0()
3479 %{
3480 constraint(ALLOC_IN_RC(vmask_reg_v0));
3481 match(RegVectMask);
3482 match(vRegMask);
3483 op_cost(0);
3484 format %{ %}
3485 interface(REG_INTER);
3486 %}
3487
3488 // Java Thread Register
3489 operand javaThread_RegP(iRegP reg)
3490 %{
3491 constraint(ALLOC_IN_RC(java_thread_reg)); // java_thread_reg
3492 match(reg);
3493 op_cost(0);
3494 format %{ %}
3495 interface(REG_INTER);
3496 %}
3497
3498 //----------Memory Operands----------------------------------------------------
3499 // RISCV has only base_plus_offset and literal address mode, so no need to use
3500 // index and scale. Here set index as 0xffffffff and scale as 0x0.
3501 operand indirect(iRegP reg)
3502 %{
3503 constraint(ALLOC_IN_RC(ptr_reg));
3504 match(reg);
3505 op_cost(0);
3506 format %{ "[$reg]" %}
3507 interface(MEMORY_INTER) %{
3508 base($reg);
3509 index(0xffffffff);
3510 scale(0x0);
3511 disp(0x0);
3512 %}
3513 %}
3514
3515 operand indOffI(iRegP reg, immIOffset off)
3516 %{
3517 constraint(ALLOC_IN_RC(ptr_reg));
3518 match(AddP reg off);
3519 op_cost(0);
3520 format %{ "[$reg, $off]" %}
3521 interface(MEMORY_INTER) %{
3522 base($reg);
3523 index(0xffffffff);
3524 scale(0x0);
3525 disp($off);
3526 %}
3527 %}
3528
3529 operand indOffL(iRegP reg, immLOffset off)
3530 %{
3531 constraint(ALLOC_IN_RC(ptr_reg));
3532 match(AddP reg off);
3533 op_cost(0);
3534 format %{ "[$reg, $off]" %}
3535 interface(MEMORY_INTER) %{
3536 base($reg);
3537 index(0xffffffff);
3538 scale(0x0);
3539 disp($off);
3540 %}
3541 %}
3542
3543 operand indirectN(iRegN reg)
3544 %{
3545 predicate(CompressedOops::shift() == 0);
3546 constraint(ALLOC_IN_RC(ptr_reg));
3547 match(DecodeN reg);
3548 op_cost(0);
3549 format %{ "[$reg]\t# narrow" %}
3550 interface(MEMORY_INTER) %{
3551 base($reg);
3552 index(0xffffffff);
3553 scale(0x0);
3554 disp(0x0);
3555 %}
3556 %}
3557
3558 operand indOffIN(iRegN reg, immIOffset off)
3559 %{
3560 predicate(CompressedOops::shift() == 0);
3561 constraint(ALLOC_IN_RC(ptr_reg));
3562 match(AddP (DecodeN reg) off);
3563 op_cost(0);
3564 format %{ "[$reg, $off]\t# narrow" %}
3565 interface(MEMORY_INTER) %{
3566 base($reg);
3567 index(0xffffffff);
3568 scale(0x0);
3569 disp($off);
3570 %}
3571 %}
3572
3573 operand indOffLN(iRegN reg, immLOffset off)
3574 %{
3575 predicate(CompressedOops::shift() == 0);
3576 constraint(ALLOC_IN_RC(ptr_reg));
3577 match(AddP (DecodeN reg) off);
3578 op_cost(0);
3579 format %{ "[$reg, $off]\t# narrow" %}
3580 interface(MEMORY_INTER) %{
3581 base($reg);
3582 index(0xffffffff);
3583 scale(0x0);
3584 disp($off);
3585 %}
3586 %}
3587
3588 //----------Special Memory Operands--------------------------------------------
3589 // Stack Slot Operand - This operand is used for loading and storing temporary
3590 // values on the stack where a match requires a value to
3591 // flow through memory.
3592 operand stackSlotI(sRegI reg)
3593 %{
3594 constraint(ALLOC_IN_RC(stack_slots));
3595 // No match rule because this operand is only generated in matching
3596 // match(RegI);
3597 format %{ "[$reg]" %}
3598 interface(MEMORY_INTER) %{
3599 base(0x02); // RSP
3600 index(0xffffffff); // No Index
3601 scale(0x0); // No Scale
3602 disp($reg); // Stack Offset
3603 %}
3604 %}
3605
3606 operand stackSlotF(sRegF reg)
3607 %{
3608 constraint(ALLOC_IN_RC(stack_slots));
3609 // No match rule because this operand is only generated in matching
3610 // match(RegF);
3611 format %{ "[$reg]" %}
3612 interface(MEMORY_INTER) %{
3613 base(0x02); // RSP
3614 index(0xffffffff); // No Index
3615 scale(0x0); // No Scale
3616 disp($reg); // Stack Offset
3617 %}
3618 %}
3619
3620 operand stackSlotD(sRegD reg)
3621 %{
3622 constraint(ALLOC_IN_RC(stack_slots));
3623 // No match rule because this operand is only generated in matching
3624 // match(RegD);
3625 format %{ "[$reg]" %}
3626 interface(MEMORY_INTER) %{
3627 base(0x02); // RSP
3628 index(0xffffffff); // No Index
3629 scale(0x0); // No Scale
3630 disp($reg); // Stack Offset
3631 %}
3632 %}
3633
3634 operand stackSlotL(sRegL reg)
3635 %{
3636 constraint(ALLOC_IN_RC(stack_slots));
3637 // No match rule because this operand is only generated in matching
3638 // match(RegL);
3639 format %{ "[$reg]" %}
3640 interface(MEMORY_INTER) %{
3641 base(0x02); // RSP
3642 index(0xffffffff); // No Index
3643 scale(0x0); // No Scale
3644 disp($reg); // Stack Offset
3645 %}
3646 %}
3647
3648 // Special operand allowing long args to int ops to be truncated for free
3649
3650 operand iRegL2I(iRegL reg) %{
3651
3652 op_cost(0);
3653
3654 match(ConvL2I reg);
3655
3656 format %{ "l2i($reg)" %}
3657
3658 interface(REG_INTER)
3659 %}
3660
3661
3662 // Comparison Operands
3663 // NOTE: Label is a predefined operand which should not be redefined in
3664 // the AD file. It is generically handled within the ADLC.
3665
3666 //----------Conditional Branch Operands----------------------------------------
3667 // Comparison Op - This is the operation of the comparison, and is limited to
3668 // the following set of codes:
3669 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
3670 //
3671 // Other attributes of the comparison, such as unsignedness, are specified
3672 // by the comparison instruction that sets a condition code flags register.
3673 // That result is represented by a flags operand whose subtype is appropriate
3674 // to the unsignedness (etc.) of the comparison.
3675 //
3676 // Later, the instruction which matches both the Comparison Op (a Bool) and
3677 // the flags (produced by the Cmp) specifies the coding of the comparison op
3678 // by matching a specific subtype of Bool operand below, such as cmpOpU.
3679
3680
3681 // used for signed integral comparisons and fp comparisons
3682 operand cmpOp()
3683 %{
3684 match(Bool);
3685
3686 format %{ "" %}
3687
3688 // the values in interface derives from struct BoolTest::mask
3689 interface(COND_INTER) %{
3690 equal(0x0, "eq");
3691 greater(0x1, "gt");
3692 overflow(0x2, "overflow");
3693 less(0x3, "lt");
3694 not_equal(0x4, "ne");
3695 less_equal(0x5, "le");
3696 no_overflow(0x6, "no_overflow");
3697 greater_equal(0x7, "ge");
3698 %}
3699 %}
3700
3701 // used for unsigned integral comparisons
3702 operand cmpOpU()
3703 %{
3704 match(Bool);
3705
3706 format %{ "" %}
3707 // the values in interface derives from struct BoolTest::mask
3708 interface(COND_INTER) %{
3709 equal(0x0, "eq");
3710 greater(0x1, "gtu");
3711 overflow(0x2, "overflow");
3712 less(0x3, "ltu");
3713 not_equal(0x4, "ne");
3714 less_equal(0x5, "leu");
3715 no_overflow(0x6, "no_overflow");
3716 greater_equal(0x7, "geu");
3717 %}
3718 %}
3719
3720 // used for certain integral comparisons which can be
3721 // converted to bxx instructions
3722 operand cmpOpEqNe()
3723 %{
3724 match(Bool);
3725 op_cost(0);
3726 predicate(n->as_Bool()->_test._test == BoolTest::ne ||
3727 n->as_Bool()->_test._test == BoolTest::eq);
3728
3729 format %{ "" %}
3730 interface(COND_INTER) %{
3731 equal(0x0, "eq");
3732 greater(0x1, "gt");
3733 overflow(0x2, "overflow");
3734 less(0x3, "lt");
3735 not_equal(0x4, "ne");
3736 less_equal(0x5, "le");
3737 no_overflow(0x6, "no_overflow");
3738 greater_equal(0x7, "ge");
3739 %}
3740 %}
3741
3742 operand cmpOpULtGe()
3743 %{
3744 match(Bool);
3745 op_cost(0);
3746 predicate(n->as_Bool()->_test._test == BoolTest::lt ||
3747 n->as_Bool()->_test._test == BoolTest::ge);
3748
3749 format %{ "" %}
3750 interface(COND_INTER) %{
3751 equal(0x0, "eq");
3752 greater(0x1, "gtu");
3753 overflow(0x2, "overflow");
3754 less(0x3, "ltu");
3755 not_equal(0x4, "ne");
3756 less_equal(0x5, "leu");
3757 no_overflow(0x6, "no_overflow");
3758 greater_equal(0x7, "geu");
3759 %}
3760 %}
3761
3762 operand cmpOpUEqNeLeGt()
3763 %{
3764 match(Bool);
3765 op_cost(0);
3766 predicate(n->as_Bool()->_test._test == BoolTest::ne ||
3767 n->as_Bool()->_test._test == BoolTest::eq ||
3768 n->as_Bool()->_test._test == BoolTest::le ||
3769 n->as_Bool()->_test._test == BoolTest::gt);
3770
3771 format %{ "" %}
3772 interface(COND_INTER) %{
3773 equal(0x0, "eq");
3774 greater(0x1, "gtu");
3775 overflow(0x2, "overflow");
3776 less(0x3, "ltu");
3777 not_equal(0x4, "ne");
3778 less_equal(0x5, "leu");
3779 no_overflow(0x6, "no_overflow");
3780 greater_equal(0x7, "geu");
3781 %}
3782 %}
3783
3784
3785 // Flags register, used as output of compare logic
3786 operand rFlagsReg()
3787 %{
3788 constraint(ALLOC_IN_RC(reg_flags));
3789 match(RegFlags);
3790
3791 op_cost(0);
3792 format %{ "RFLAGS" %}
3793 interface(REG_INTER);
3794 %}
3795
3796 // Special Registers
3797
3798 // Method Register
3799 operand inline_cache_RegP(iRegP reg)
3800 %{
3801 constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
3802 match(reg);
3803 match(iRegPNoSp);
3804 op_cost(0);
3805 format %{ %}
3806 interface(REG_INTER);
3807 %}
3808
3809 //----------OPERAND CLASSES----------------------------------------------------
3810 // Operand Classes are groups of operands that are used as to simplify
3811 // instruction definitions by not requiring the AD writer to specify
3812 // separate instructions for every form of operand when the
3813 // instruction accepts multiple operand types with the same basic
3814 // encoding and format. The classic case of this is memory operands.
3815
3816 // memory is used to define read/write location for load/store
3817 // instruction defs. we can turn a memory op into an Address
3818
3819 opclass memory(indirect, indOffI, indOffL, indirectN, indOffIN, indOffLN);
3820
3821 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
3822 // operations. it allows the src to be either an iRegI or a (ConvL2I
3823 // iRegL). in the latter case the l2i normally planted for a ConvL2I
3824 // can be elided because the 32-bit instruction will just employ the
3825 // lower 32 bits anyway.
3826 //
3827 // n.b. this does not elide all L2I conversions. if the truncated
3828 // value is consumed by more than one operation then the ConvL2I
3829 // cannot be bundled into the consuming nodes so an l2i gets planted
3830 // (actually an addiw $dst, $src, 0) and the downstream instructions
3831 // consume the result of the L2I as an iRegI input. That's a shame since
3832 // the addiw is actually redundant but its not too costly.
3833
3834 opclass iRegIorL2I(iRegI, iRegL2I);
3835 opclass iRegIorL(iRegI, iRegL);
3836 opclass iRegNorP(iRegN, iRegP);
3837 opclass iRegILNP(iRegI, iRegL, iRegN, iRegP);
3838 opclass iRegILNPNoSp(iRegINoSp, iRegLNoSp, iRegNNoSp, iRegPNoSp);
3839 opclass immIorL(immI, immL);
3840
3841 //----------PIPELINE-----------------------------------------------------------
3842 // Rules which define the behavior of the target architectures pipeline.
3843
3844 // For specific pipelines, e.g. generic RISC-V, define the stages of that pipeline
3845 //pipe_desc(ID, EX, MEM, WR);
3846 #define ID S0
3847 #define EX S1
3848 #define MEM S2
3849 #define WR S3
3850
3851 // Integer ALU reg operation
3852 pipeline %{
3853
3854 attributes %{
3855 // RISC-V instructions are of fixed length
3856 fixed_size_instructions; // Fixed size instructions TODO does
3857 max_instructions_per_bundle = 2; // Generic RISC-V 1, Sifive Series 7 2
3858 // RISC-V instructions come in 32-bit word units
3859 instruction_unit_size = 4; // An instruction is 4 bytes long
3860 instruction_fetch_unit_size = 64; // The processor fetches one line
3861 instruction_fetch_units = 1; // of 64 bytes
3862
3863 // List of nop instructions
3864 nops( MachNop );
3865 %}
3866
3867 // We don't use an actual pipeline model so don't care about resources
3868 // or description. we do use pipeline classes to introduce fixed
3869 // latencies
3870
3871 //----------RESOURCES----------------------------------------------------------
3872 // Resources are the functional units available to the machine
3873
3874 // Generic RISC-V pipeline
3875 // 1 decoder
3876 // 1 instruction decoded per cycle
3877 // 1 load/store ops per cycle, 1 branch, 1 FPU
3878 // 1 mul, 1 div
3879
3880 resources ( DECODE,
3881 ALU,
3882 MUL,
3883 DIV,
3884 BRANCH,
3885 LDST,
3886 FPU);
3887
3888 //----------PIPELINE DESCRIPTION-----------------------------------------------
3889 // Pipeline Description specifies the stages in the machine's pipeline
3890
3891 // Define the pipeline as a generic 6 stage pipeline
3892 pipe_desc(S0, S1, S2, S3, S4, S5);
3893
3894 //----------PIPELINE CLASSES---------------------------------------------------
3895 // Pipeline Classes describe the stages in which input and output are
3896 // referenced by the hardware pipeline.
3897
3898 pipe_class fp_dop_reg_reg_s(fRegF dst, fRegF src1, fRegF src2)
3899 %{
3900 single_instruction;
3901 src1 : S1(read);
3902 src2 : S2(read);
3903 dst : S5(write);
3904 DECODE : ID;
3905 FPU : S5;
3906 %}
3907
3908 pipe_class fp_dop_reg_reg_d(fRegD dst, fRegD src1, fRegD src2)
3909 %{
3910 src1 : S1(read);
3911 src2 : S2(read);
3912 dst : S5(write);
3913 DECODE : ID;
3914 FPU : S5;
3915 %}
3916
3917 pipe_class fp_uop_s(fRegF dst, fRegF src)
3918 %{
3919 single_instruction;
3920 src : S1(read);
3921 dst : S5(write);
3922 DECODE : ID;
3923 FPU : S5;
3924 %}
3925
3926 pipe_class fp_uop_d(fRegD dst, fRegD src)
3927 %{
3928 single_instruction;
3929 src : S1(read);
3930 dst : S5(write);
3931 DECODE : ID;
3932 FPU : S5;
3933 %}
3934
3935 pipe_class fp_d2f(fRegF dst, fRegD src)
3936 %{
3937 single_instruction;
3938 src : S1(read);
3939 dst : S5(write);
3940 DECODE : ID;
3941 FPU : S5;
3942 %}
3943
3944 pipe_class fp_f2d(fRegD dst, fRegF src)
3945 %{
3946 single_instruction;
3947 src : S1(read);
3948 dst : S5(write);
3949 DECODE : ID;
3950 FPU : S5;
3951 %}
3952
3953 pipe_class fp_f2i(iRegINoSp dst, fRegF src)
3954 %{
3955 single_instruction;
3956 src : S1(read);
3957 dst : S5(write);
3958 DECODE : ID;
3959 FPU : S5;
3960 %}
3961
3962 pipe_class fp_f2l(iRegLNoSp dst, fRegF src)
3963 %{
3964 single_instruction;
3965 src : S1(read);
3966 dst : S5(write);
3967 DECODE : ID;
3968 FPU : S5;
3969 %}
3970
3971 pipe_class fp_i2f(fRegF dst, iRegIorL2I src)
3972 %{
3973 single_instruction;
3974 src : S1(read);
3975 dst : S5(write);
3976 DECODE : ID;
3977 FPU : S5;
3978 %}
3979
3980 pipe_class fp_l2f(fRegF dst, iRegL src)
3981 %{
3982 single_instruction;
3983 src : S1(read);
3984 dst : S5(write);
3985 DECODE : ID;
3986 FPU : S5;
3987 %}
3988
3989 pipe_class fp_d2i(iRegINoSp dst, fRegD src)
3990 %{
3991 single_instruction;
3992 src : S1(read);
3993 dst : S5(write);
3994 DECODE : ID;
3995 FPU : S5;
3996 %}
3997
3998 pipe_class fp_d2l(iRegLNoSp dst, fRegD src)
3999 %{
4000 single_instruction;
4001 src : S1(read);
4002 dst : S5(write);
4003 DECODE : ID;
4004 FPU : S5;
4005 %}
4006
4007 pipe_class fp_i2d(fRegD dst, iRegIorL2I src)
4008 %{
4009 single_instruction;
4010 src : S1(read);
4011 dst : S5(write);
4012 DECODE : ID;
4013 FPU : S5;
4014 %}
4015
4016 pipe_class fp_l2d(fRegD dst, iRegIorL2I src)
4017 %{
4018 single_instruction;
4019 src : S1(read);
4020 dst : S5(write);
4021 DECODE : ID;
4022 FPU : S5;
4023 %}
4024
4025 pipe_class fp_div_s(fRegF dst, fRegF src1, fRegF src2)
4026 %{
4027 single_instruction;
4028 src1 : S1(read);
4029 src2 : S2(read);
4030 dst : S5(write);
4031 DECODE : ID;
4032 FPU : S5;
4033 %}
4034
4035 pipe_class fp_div_d(fRegD dst, fRegD src1, fRegD src2)
4036 %{
4037 single_instruction;
4038 src1 : S1(read);
4039 src2 : S2(read);
4040 dst : S5(write);
4041 DECODE : ID;
4042 FPU : S5;
4043 %}
4044
4045 pipe_class fp_sqrt_s(fRegF dst, fRegF src)
4046 %{
4047 single_instruction;
4048 src : S1(read);
4049 dst : S5(write);
4050 DECODE : ID;
4051 FPU : S5;
4052 %}
4053
4054 pipe_class fp_sqrt_d(fRegD dst, fRegD src)
4055 %{
4056 single_instruction;
4057 src : S1(read);
4058 dst : S5(write);
4059 DECODE : ID;
4060 FPU : S5;
4061 %}
4062
4063 pipe_class fp_load_constant_s(fRegF dst)
4064 %{
4065 single_instruction;
4066 dst : S5(write);
4067 DECODE : ID;
4068 FPU : S5;
4069 %}
4070
4071 pipe_class fp_load_constant_d(fRegD dst)
4072 %{
4073 single_instruction;
4074 dst : S5(write);
4075 DECODE : ID;
4076 FPU : S5;
4077 %}
4078
4079 pipe_class fp_load_mem_s(fRegF dst, memory mem)
4080 %{
4081 single_instruction;
4082 mem : S1(read);
4083 dst : S5(write);
4084 DECODE : ID;
4085 LDST : MEM;
4086 %}
4087
4088 pipe_class fp_load_mem_d(fRegD dst, memory mem)
4089 %{
4090 single_instruction;
4091 mem : S1(read);
4092 dst : S5(write);
4093 DECODE : ID;
4094 LDST : MEM;
4095 %}
4096
4097 pipe_class fp_store_reg_s(fRegF src, memory mem)
4098 %{
4099 single_instruction;
4100 src : S1(read);
4101 mem : S5(write);
4102 DECODE : ID;
4103 LDST : MEM;
4104 %}
4105
4106 pipe_class fp_store_reg_d(fRegD src, memory mem)
4107 %{
4108 single_instruction;
4109 src : S1(read);
4110 mem : S5(write);
4111 DECODE : ID;
4112 LDST : MEM;
4113 %}
4114
4115 //------- Integer ALU operations --------------------------
4116
4117 // Integer ALU reg-reg operation
4118 // Operands needs in ID, result generated in EX
4119 // E.g. ADD Rd, Rs1, Rs2
4120 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
4121 %{
4122 single_instruction;
4123 dst : EX(write);
4124 src1 : ID(read);
4125 src2 : ID(read);
4126 DECODE : ID;
4127 ALU : EX;
4128 %}
4129
4130 // Integer ALU reg operation with constant shift
4131 // E.g. SLLI Rd, Rs1, #shift
4132 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
4133 %{
4134 single_instruction;
4135 dst : EX(write);
4136 src1 : ID(read);
4137 DECODE : ID;
4138 ALU : EX;
4139 %}
4140
4141 // Integer ALU reg-reg operation with variable shift
4142 // both operands must be available in ID
4143 // E.g. SLL Rd, Rs1, Rs2
4144 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
4145 %{
4146 single_instruction;
4147 dst : EX(write);
4148 src1 : ID(read);
4149 src2 : ID(read);
4150 DECODE : ID;
4151 ALU : EX;
4152 %}
4153
4154 // Integer ALU reg operation
4155 // E.g. NEG Rd, Rs2
4156 pipe_class ialu_reg(iRegI dst, iRegI src)
4157 %{
4158 single_instruction;
4159 dst : EX(write);
4160 src : ID(read);
4161 DECODE : ID;
4162 ALU : EX;
4163 %}
4164
4165 // Integer ALU reg immediate operation
4166 // E.g. ADDI Rd, Rs1, #imm
4167 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
4168 %{
4169 single_instruction;
4170 dst : EX(write);
4171 src1 : ID(read);
4172 DECODE : ID;
4173 ALU : EX;
4174 %}
4175
4176 // Integer ALU immediate operation (no source operands)
4177 // E.g. LI Rd, #imm
4178 pipe_class ialu_imm(iRegI dst)
4179 %{
4180 single_instruction;
4181 dst : EX(write);
4182 DECODE : ID;
4183 ALU : EX;
4184 %}
4185
4186 //------- Multiply pipeline operations --------------------
4187
4188 // Multiply reg-reg
4189 // E.g. MULW Rd, Rs1, Rs2
4190 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
4191 %{
4192 single_instruction;
4193 dst : WR(write);
4194 src1 : ID(read);
4195 src2 : ID(read);
4196 DECODE : ID;
4197 MUL : WR;
4198 %}
4199
4200 // E.g. MUL RD, Rs1, Rs2
4201 pipe_class lmul_reg_reg(iRegL dst, iRegL src1, iRegL src2)
4202 %{
4203 single_instruction;
4204 fixed_latency(3); // Maximum latency for 64 bit mul
4205 dst : WR(write);
4206 src1 : ID(read);
4207 src2 : ID(read);
4208 DECODE : ID;
4209 MUL : WR;
4210 %}
4211
4212 //------- Divide pipeline operations --------------------
4213
4214 // E.g. DIVW Rd, Rs1, Rs2
4215 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
4216 %{
4217 single_instruction;
4218 fixed_latency(8); // Maximum latency for 32 bit divide
4219 dst : WR(write);
4220 src1 : ID(read);
4221 src2 : ID(read);
4222 DECODE : ID;
4223 DIV : WR;
4224 %}
4225
4226 // E.g. DIV RD, Rs1, Rs2
4227 pipe_class ldiv_reg_reg(iRegL dst, iRegL src1, iRegL src2)
4228 %{
4229 single_instruction;
4230 fixed_latency(16); // Maximum latency for 64 bit divide
4231 dst : WR(write);
4232 src1 : ID(read);
4233 src2 : ID(read);
4234 DECODE : ID;
4235 DIV : WR;
4236 %}
4237
4238 //------- Load pipeline operations ------------------------
4239
4240 // Load - prefetch
4241 // Eg. PREFETCH_W mem
4242 pipe_class iload_prefetch(memory mem)
4243 %{
4244 single_instruction;
4245 mem : ID(read);
4246 DECODE : ID;
4247 LDST : MEM;
4248 %}
4249
4250 // Load - reg, mem
4251 // E.g. LA Rd, mem
4252 pipe_class iload_reg_mem(iRegI dst, memory mem)
4253 %{
4254 single_instruction;
4255 dst : WR(write);
4256 mem : ID(read);
4257 DECODE : ID;
4258 LDST : MEM;
4259 %}
4260
4261 // Load - reg, reg
4262 // E.g. LD Rd, Rs
4263 pipe_class iload_reg_reg(iRegI dst, iRegI src)
4264 %{
4265 single_instruction;
4266 dst : WR(write);
4267 src : ID(read);
4268 DECODE : ID;
4269 LDST : MEM;
4270 %}
4271
4272 //------- Store pipeline operations -----------------------
4273
4274 // Store - zr, mem
4275 // E.g. SD zr, mem
4276 pipe_class istore_mem(memory mem)
4277 %{
4278 single_instruction;
4279 mem : ID(read);
4280 DECODE : ID;
4281 LDST : MEM;
4282 %}
4283
4284 // Store - reg, mem
4285 // E.g. SD Rs, mem
4286 pipe_class istore_reg_mem(iRegI src, memory mem)
4287 %{
4288 single_instruction;
4289 mem : ID(read);
4290 src : EX(read);
4291 DECODE : ID;
4292 LDST : MEM;
4293 %}
4294
4295 // Store - reg, reg
4296 // E.g. SD Rs2, Rs1
4297 pipe_class istore_reg_reg(iRegI dst, iRegI src)
4298 %{
4299 single_instruction;
4300 dst : ID(read);
4301 src : EX(read);
4302 DECODE : ID;
4303 LDST : MEM;
4304 %}
4305
4306 //------- Control transfer pipeline operations ------------
4307
4308 // Branch
4309 pipe_class pipe_branch()
4310 %{
4311 single_instruction;
4312 DECODE : ID;
4313 BRANCH : EX;
4314 %}
4315
4316 // Branch
4317 pipe_class pipe_branch_reg(iRegI src)
4318 %{
4319 single_instruction;
4320 src : ID(read);
4321 DECODE : ID;
4322 BRANCH : EX;
4323 %}
4324
4325 // Compare & Branch
4326 // E.g. BEQ Rs1, Rs2, L
4327 pipe_class pipe_cmp_branch(iRegI src1, iRegI src2)
4328 %{
4329 single_instruction;
4330 src1 : ID(read);
4331 src2 : ID(read);
4332 DECODE : ID;
4333 BRANCH : EX;
4334 %}
4335
4336 // E.g. BEQZ Rs, L
4337 pipe_class pipe_cmpz_branch(iRegI src)
4338 %{
4339 single_instruction;
4340 src : ID(read);
4341 DECODE : ID;
4342 BRANCH : EX;
4343 %}
4344
4345 //------- Synchronisation operations ----------------------
4346 // Any operation requiring serialization
4347 // E.g. FENCE/Atomic Ops/Load Acquire/Store Release
4348 pipe_class pipe_serial()
4349 %{
4350 single_instruction;
4351 force_serialization;
4352 fixed_latency(16);
4353 DECODE : ID;
4354 LDST : MEM;
4355 %}
4356
4357 pipe_class pipe_slow()
4358 %{
4359 instruction_count(10);
4360 multiple_bundles;
4361 force_serialization;
4362 fixed_latency(16);
4363 DECODE : ID;
4364 LDST : MEM;
4365 %}
4366
4367 // The real do-nothing guy
4368 pipe_class real_empty()
4369 %{
4370 instruction_count(0);
4371 %}
4372
4373 // Empty pipeline class
4374 pipe_class pipe_class_empty()
4375 %{
4376 single_instruction;
4377 fixed_latency(0);
4378 %}
4379
4380 // Default pipeline class.
4381 pipe_class pipe_class_default()
4382 %{
4383 single_instruction;
4384 fixed_latency(2);
4385 %}
4386
4387 // Pipeline class for compares.
4388 pipe_class pipe_class_compare()
4389 %{
4390 single_instruction;
4391 fixed_latency(16);
4392 %}
4393
4394 // Pipeline class for memory operations.
4395 pipe_class pipe_class_memory()
4396 %{
4397 single_instruction;
4398 fixed_latency(16);
4399 %}
4400
4401 // Pipeline class for call.
4402 pipe_class pipe_class_call()
4403 %{
4404 single_instruction;
4405 fixed_latency(100);
4406 %}
4407
4408 // Define the class for the Nop node.
4409 define %{
4410 MachNop = pipe_class_empty;
4411 %}
4412 %}
4413 //----------INSTRUCTIONS-------------------------------------------------------
4414 //
4415 // match -- States which machine-independent subtree may be replaced
4416 // by this instruction.
4417 // ins_cost -- The estimated cost of this instruction is used by instruction
4418 // selection to identify a minimum cost tree of machine
4419 // instructions that matches a tree of machine-independent
4420 // instructions.
4421 // format -- A string providing the disassembly for this instruction.
4422 // The value of an instruction's operand may be inserted
4423 // by referring to it with a '$' prefix.
4424 // opcode -- Three instruction opcodes may be provided. These are referred
4425 // to within an encode class as $primary, $secondary, and $tertiary
4426 // rrspectively. The primary opcode is commonly used to
4427 // indicate the type of machine instruction, while secondary
4428 // and tertiary are often used for prefix options or addressing
4429 // modes.
4430 // ins_encode -- A list of encode classes with parameters. The encode class
4431 // name must have been defined in an 'enc_class' specification
4432 // in the encode section of the architecture description.
4433
4434 // ============================================================================
4435 // Memory (Load/Store) Instructions
4436
4437 // Load Instructions
4438
4439 // Load Byte (8 bit signed)
4440 instruct loadB(iRegINoSp dst, memory mem)
4441 %{
4442 match(Set dst (LoadB mem));
4443
4444 ins_cost(LOAD_COST);
4445 format %{ "lb $dst, $mem\t# byte, #@loadB" %}
4446
4447 ins_encode %{
4448 __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4449 %}
4450
4451 ins_pipe(iload_reg_mem);
4452 %}
4453
4454 // Load Byte (8 bit signed) into long
4455 instruct loadB2L(iRegLNoSp dst, memory mem)
4456 %{
4457 match(Set dst (ConvI2L (LoadB mem)));
4458
4459 ins_cost(LOAD_COST);
4460 format %{ "lb $dst, $mem\t# byte, #@loadB2L" %}
4461
4462 ins_encode %{
4463 __ lb(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4464 %}
4465
4466 ins_pipe(iload_reg_mem);
4467 %}
4468
4469 // Load Byte (8 bit unsigned)
4470 instruct loadUB(iRegINoSp dst, memory mem)
4471 %{
4472 match(Set dst (LoadUB mem));
4473
4474 ins_cost(LOAD_COST);
4475 format %{ "lbu $dst, $mem\t# byte, #@loadUB" %}
4476
4477 ins_encode %{
4478 __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4479 %}
4480
4481 ins_pipe(iload_reg_mem);
4482 %}
4483
4484 // Load Byte (8 bit unsigned) into long
4485 instruct loadUB2L(iRegLNoSp dst, memory mem)
4486 %{
4487 match(Set dst (ConvI2L (LoadUB mem)));
4488
4489 ins_cost(LOAD_COST);
4490 format %{ "lbu $dst, $mem\t# byte, #@loadUB2L" %}
4491
4492 ins_encode %{
4493 __ lbu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4494 %}
4495
4496 ins_pipe(iload_reg_mem);
4497 %}
4498
4499 // Load Short (16 bit signed)
4500 instruct loadS(iRegINoSp dst, memory mem)
4501 %{
4502 match(Set dst (LoadS mem));
4503
4504 ins_cost(LOAD_COST);
4505 format %{ "lh $dst, $mem\t# short, #@loadS" %}
4506
4507 ins_encode %{
4508 __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4509 %}
4510
4511 ins_pipe(iload_reg_mem);
4512 %}
4513
4514 // Load Short (16 bit signed) into long
4515 instruct loadS2L(iRegLNoSp dst, memory mem)
4516 %{
4517 match(Set dst (ConvI2L (LoadS mem)));
4518
4519 ins_cost(LOAD_COST);
4520 format %{ "lh $dst, $mem\t# short, #@loadS2L" %}
4521
4522 ins_encode %{
4523 __ lh(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4524 %}
4525
4526 ins_pipe(iload_reg_mem);
4527 %}
4528
4529 // Load Char (16 bit unsigned)
4530 instruct loadUS(iRegINoSp dst, memory mem)
4531 %{
4532 match(Set dst (LoadUS mem));
4533
4534 ins_cost(LOAD_COST);
4535 format %{ "lhu $dst, $mem\t# short, #@loadUS" %}
4536
4537 ins_encode %{
4538 __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4539 %}
4540
4541 ins_pipe(iload_reg_mem);
4542 %}
4543
4544 // Load Short/Char (16 bit unsigned) into long
4545 instruct loadUS2L(iRegLNoSp dst, memory mem)
4546 %{
4547 match(Set dst (ConvI2L (LoadUS mem)));
4548
4549 ins_cost(LOAD_COST);
4550 format %{ "lhu $dst, $mem\t# short, #@loadUS2L" %}
4551
4552 ins_encode %{
4553 __ lhu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4554 %}
4555
4556 ins_pipe(iload_reg_mem);
4557 %}
4558
4559 // Load Integer (32 bit signed)
4560 instruct loadI(iRegINoSp dst, memory mem)
4561 %{
4562 match(Set dst (LoadI mem));
4563
4564 ins_cost(LOAD_COST);
4565 format %{ "lw $dst, $mem\t# int, #@loadI" %}
4566
4567 ins_encode %{
4568 __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4569 %}
4570
4571 ins_pipe(iload_reg_mem);
4572 %}
4573
4574 // Load Integer (32 bit signed) into long
4575 instruct loadI2L(iRegLNoSp dst, memory mem)
4576 %{
4577 match(Set dst (ConvI2L (LoadI mem)));
4578
4579 ins_cost(LOAD_COST);
4580 format %{ "lw $dst, $mem\t# int, #@loadI2L" %}
4581
4582 ins_encode %{
4583 __ lw(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4584 %}
4585
4586 ins_pipe(iload_reg_mem);
4587 %}
4588
4589 // Load Integer (32 bit unsigned) into long
4590 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
4591 %{
4592 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
4593
4594 ins_cost(LOAD_COST);
4595 format %{ "lwu $dst, $mem\t# int, #@loadUI2L" %}
4596
4597 ins_encode %{
4598 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4599 %}
4600
4601 ins_pipe(iload_reg_mem);
4602 %}
4603
4604 // Load Long (64 bit signed)
4605 instruct loadL(iRegLNoSp dst, memory mem)
4606 %{
4607 match(Set dst (LoadL mem));
4608
4609 ins_cost(LOAD_COST);
4610 format %{ "ld $dst, $mem\t# int, #@loadL" %}
4611
4612 ins_encode %{
4613 __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4614 %}
4615
4616 ins_pipe(iload_reg_mem);
4617 %}
4618
4619 // Load Range
4620 instruct loadRange(iRegINoSp dst, memory mem)
4621 %{
4622 match(Set dst (LoadRange mem));
4623
4624 ins_cost(LOAD_COST);
4625 format %{ "lwu $dst, $mem\t# range, #@loadRange" %}
4626
4627 ins_encode %{
4628 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4629 %}
4630
4631 ins_pipe(iload_reg_mem);
4632 %}
4633
4634 // Load Pointer
4635 instruct loadP(iRegPNoSp dst, memory mem)
4636 %{
4637 match(Set dst (LoadP mem));
4638 predicate(n->as_Load()->barrier_data() == 0);
4639
4640 ins_cost(LOAD_COST);
4641 format %{ "ld $dst, $mem\t# ptr, #@loadP" %}
4642
4643 ins_encode %{
4644 __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4645 %}
4646
4647 ins_pipe(iload_reg_mem);
4648 %}
4649
4650 // Load Compressed Pointer
4651 instruct loadN(iRegNNoSp dst, memory mem)
4652 %{
4653 predicate(n->as_Load()->barrier_data() == 0);
4654 match(Set dst (LoadN mem));
4655
4656 ins_cost(LOAD_COST);
4657 format %{ "lwu $dst, $mem\t# compressed ptr, #@loadN" %}
4658
4659 ins_encode %{
4660 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4661 %}
4662
4663 ins_pipe(iload_reg_mem);
4664 %}
4665
4666 // Load Klass Pointer
4667 instruct loadKlass(iRegPNoSp dst, memory mem)
4668 %{
4669 match(Set dst (LoadKlass mem));
4670
4671 ins_cost(LOAD_COST);
4672 format %{ "ld $dst, $mem\t# class, #@loadKlass" %}
4673
4674 ins_encode %{
4675 __ ld(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4676 %}
4677
4678 ins_pipe(iload_reg_mem);
4679 %}
4680
4681 // Load Narrow Klass Pointer
4682 instruct loadNKlass(iRegNNoSp dst, memory mem)
4683 %{
4684 predicate(!UseCompactObjectHeaders);
4685 match(Set dst (LoadNKlass mem));
4686
4687 ins_cost(LOAD_COST);
4688 format %{ "lwu $dst, $mem\t# compressed class ptr, #@loadNKlass" %}
4689
4690 ins_encode %{
4691 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4692 %}
4693
4694 ins_pipe(iload_reg_mem);
4695 %}
4696
4697 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory mem)
4698 %{
4699 predicate(UseCompactObjectHeaders);
4700 match(Set dst (LoadNKlass mem));
4701
4702 ins_cost(LOAD_COST);
4703 format %{
4704 "lwu $dst, $mem\t# compressed klass ptr, shifted\n\t"
4705 "srli $dst, $dst, markWord::klass_shift_at_offset"
4706 %}
4707
4708 ins_encode %{
4709 __ lwu(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4710 __ srli(as_Register($dst$$reg), as_Register($dst$$reg), (unsigned) markWord::klass_shift_at_offset);
4711 %}
4712
4713 ins_pipe(iload_reg_mem);
4714 %}
4715
4716 // Load Float
4717 instruct loadF(fRegF dst, memory mem)
4718 %{
4719 match(Set dst (LoadF mem));
4720
4721 ins_cost(LOAD_COST);
4722 format %{ "flw $dst, $mem\t# float, #@loadF" %}
4723
4724 ins_encode %{
4725 __ flw(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4726 %}
4727
4728 ins_pipe(fp_load_mem_s);
4729 %}
4730
4731 // Load Double
4732 instruct loadD(fRegD dst, memory mem)
4733 %{
4734 match(Set dst (LoadD mem));
4735
4736 ins_cost(LOAD_COST);
4737 format %{ "fld $dst, $mem\t# double, #@loadD" %}
4738
4739 ins_encode %{
4740 __ fld(as_FloatRegister($dst$$reg), Address(as_Register($mem$$base), $mem$$disp));
4741 %}
4742
4743 ins_pipe(fp_load_mem_d);
4744 %}
4745
4746 // Load Int Constant
4747 instruct loadConI(iRegINoSp dst, immI src)
4748 %{
4749 match(Set dst src);
4750
4751 ins_cost(ALU_COST);
4752 format %{ "mv $dst, $src\t# int, #@loadConI" %}
4753
4754 ins_encode(riscv_enc_mov_imm(dst, src));
4755
4756 ins_pipe(ialu_imm);
4757 %}
4758
4759 // Load Long Constant
4760 instruct loadConL(iRegLNoSp dst, immL src)
4761 %{
4762 match(Set dst src);
4763
4764 ins_cost(ALU_COST);
4765 format %{ "mv $dst, $src\t# long, #@loadConL" %}
4766
4767 ins_encode(riscv_enc_mov_imm(dst, src));
4768
4769 ins_pipe(ialu_imm);
4770 %}
4771
4772 // Load Pointer Constant
4773 instruct loadConP(iRegPNoSp dst, immP con)
4774 %{
4775 match(Set dst con);
4776
4777 ins_cost(ALU_COST);
4778 format %{ "mv $dst, $con\t# ptr, #@loadConP" %}
4779
4780 ins_encode(riscv_enc_mov_p(dst, con));
4781
4782 ins_pipe(ialu_imm);
4783 %}
4784
4785 // Load Null Pointer Constant
4786 instruct loadConP0(iRegPNoSp dst, immP0 con)
4787 %{
4788 match(Set dst con);
4789
4790 ins_cost(ALU_COST);
4791 format %{ "mv $dst, $con\t# null pointer, #@loadConP0" %}
4792
4793 ins_encode(riscv_enc_mov_zero(dst));
4794
4795 ins_pipe(ialu_imm);
4796 %}
4797
4798 // Load Pointer Constant One
4799 instruct loadConP1(iRegPNoSp dst, immP_1 con)
4800 %{
4801 match(Set dst con);
4802
4803 ins_cost(ALU_COST);
4804 format %{ "mv $dst, $con\t# load ptr constant one, #@loadConP1" %}
4805
4806 ins_encode(riscv_enc_mov_p1(dst));
4807
4808 ins_pipe(ialu_imm);
4809 %}
4810
4811 // Load Byte Map Base Constant
4812 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
4813 %{
4814 match(Set dst con);
4815 ins_cost(ALU_COST);
4816 format %{ "mv $dst, $con\t# Byte Map Base, #@loadByteMapBase" %}
4817
4818 ins_encode(riscv_enc_mov_byte_map_base(dst));
4819
4820 ins_pipe(ialu_imm);
4821 %}
4822
4823 // Load Narrow Pointer Constant
4824 instruct loadConN(iRegNNoSp dst, immN con)
4825 %{
4826 match(Set dst con);
4827
4828 ins_cost(ALU_COST * 4);
4829 format %{ "mv $dst, $con\t# compressed ptr, #@loadConN" %}
4830
4831 ins_encode(riscv_enc_mov_n(dst, con));
4832
4833 ins_pipe(ialu_imm);
4834 %}
4835
4836 // Load Narrow Null Pointer Constant
4837 instruct loadConN0(iRegNNoSp dst, immN0 con)
4838 %{
4839 match(Set dst con);
4840
4841 ins_cost(ALU_COST);
4842 format %{ "mv $dst, $con\t# compressed null pointer, #@loadConN0" %}
4843
4844 ins_encode(riscv_enc_mov_zero(dst));
4845
4846 ins_pipe(ialu_imm);
4847 %}
4848
4849 // Load Narrow Klass Constant
4850 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
4851 %{
4852 match(Set dst con);
4853
4854 ins_cost(ALU_COST * 6);
4855 format %{ "mv $dst, $con\t# compressed klass ptr, #@loadConNKlass" %}
4856
4857 ins_encode(riscv_enc_mov_nk(dst, con));
4858
4859 ins_pipe(ialu_imm);
4860 %}
4861
4862 // Load Half Float Constant
4863 instruct loadConH(fRegF dst, immH con) %{
4864 match(Set dst con);
4865
4866 ins_cost(LOAD_COST);
4867 format %{
4868 "flh $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConH"
4869 %}
4870
4871 ins_encode %{
4872 assert(UseZfh || UseZfhmin, "must");
4873 if (MacroAssembler::can_hf_imm_load($con$$constant)) {
4874 __ fli_h(as_FloatRegister($dst$$reg), $con$$constant);
4875 } else {
4876 __ flh(as_FloatRegister($dst$$reg), $constantaddress($con));
4877 }
4878 %}
4879
4880 ins_pipe(fp_load_constant_s);
4881 %}
4882
4883 instruct loadConH0(fRegF dst, immH0 con) %{
4884 match(Set dst con);
4885
4886 ins_cost(XFER_COST);
4887
4888 format %{ "fmv.h.x $dst, zr\t# float, #@loadConH0" %}
4889
4890 ins_encode %{
4891 assert(UseZfh || UseZfhmin, "must");
4892 __ fmv_h_x(as_FloatRegister($dst$$reg), zr);
4893 %}
4894
4895 ins_pipe(fp_load_constant_s);
4896 %}
4897
4898 // Load Float Constant
4899 instruct loadConF(fRegF dst, immF con) %{
4900 match(Set dst con);
4901
4902 ins_cost(LOAD_COST);
4903 format %{
4904 "flw $dst, [$constantaddress]\t# load from constant table: float=$con, #@loadConF"
4905 %}
4906
4907 ins_encode %{
4908 if (MacroAssembler::can_fp_imm_load($con$$constant)) {
4909 __ fli_s(as_FloatRegister($dst$$reg), $con$$constant);
4910 } else {
4911 __ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
4912 }
4913 %}
4914
4915 ins_pipe(fp_load_constant_s);
4916 %}
4917
4918 instruct loadConF0(fRegF dst, immF0 con) %{
4919 match(Set dst con);
4920
4921 ins_cost(XFER_COST);
4922
4923 format %{ "fmv.w.x $dst, zr\t# float, #@loadConF0" %}
4924
4925 ins_encode %{
4926 __ fmv_w_x(as_FloatRegister($dst$$reg), zr);
4927 %}
4928
4929 ins_pipe(fp_load_constant_s);
4930 %}
4931
4932 // Load Double Constant
4933 instruct loadConD(fRegD dst, immD con) %{
4934 match(Set dst con);
4935
4936 ins_cost(LOAD_COST);
4937 format %{
4938 "fld $dst, [$constantaddress]\t# load from constant table: double=$con, #@loadConD"
4939 %}
4940
4941 ins_encode %{
4942 if (MacroAssembler::can_dp_imm_load($con$$constant)) {
4943 __ fli_d(as_FloatRegister($dst$$reg), $con$$constant);
4944 } else {
4945 __ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
4946 }
4947 %}
4948
4949 ins_pipe(fp_load_constant_d);
4950 %}
4951
4952 instruct loadConD0(fRegD dst, immD0 con) %{
4953 match(Set dst con);
4954
4955 ins_cost(XFER_COST);
4956
4957 format %{ "fmv.d.x $dst, zr\t# double, #@loadConD0" %}
4958
4959 ins_encode %{
4960 __ fmv_d_x(as_FloatRegister($dst$$reg), zr);
4961 %}
4962
4963 ins_pipe(fp_load_constant_d);
4964 %}
4965
4966 // Store Byte
4967 instruct storeB(iRegIorL2I src, memory mem)
4968 %{
4969 match(Set mem (StoreB mem src));
4970
4971 ins_cost(STORE_COST);
4972 format %{ "sb $src, $mem\t# byte, #@storeB" %}
4973
4974 ins_encode %{
4975 __ sb(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
4976 %}
4977
4978 ins_pipe(istore_reg_mem);
4979 %}
4980
4981 instruct storeimmB0(immI0 zero, memory mem)
4982 %{
4983 match(Set mem (StoreB mem zero));
4984
4985 ins_cost(STORE_COST);
4986 format %{ "sb zr, $mem\t# byte, #@storeimmB0" %}
4987
4988 ins_encode %{
4989 __ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
4990 %}
4991
4992 ins_pipe(istore_mem);
4993 %}
4994
4995 // Store Char/Short
4996 instruct storeC(iRegIorL2I src, memory mem)
4997 %{
4998 match(Set mem (StoreC mem src));
4999
5000 ins_cost(STORE_COST);
5001 format %{ "sh $src, $mem\t# short, #@storeC" %}
5002
5003 ins_encode %{
5004 __ sh(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5005 %}
5006
5007 ins_pipe(istore_reg_mem);
5008 %}
5009
5010 instruct storeimmC0(immI0 zero, memory mem)
5011 %{
5012 match(Set mem (StoreC mem zero));
5013
5014 ins_cost(STORE_COST);
5015 format %{ "sh zr, $mem\t# short, #@storeimmC0" %}
5016
5017 ins_encode %{
5018 __ sh(zr, Address(as_Register($mem$$base), $mem$$disp));
5019 %}
5020
5021 ins_pipe(istore_mem);
5022 %}
5023
5024 // Store Integer
5025 instruct storeI(iRegIorL2I src, memory mem)
5026 %{
5027 match(Set mem(StoreI mem src));
5028
5029 ins_cost(STORE_COST);
5030 format %{ "sw $src, $mem\t# int, #@storeI" %}
5031
5032 ins_encode %{
5033 __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5034 %}
5035
5036 ins_pipe(istore_reg_mem);
5037 %}
5038
5039 instruct storeimmI0(immI0 zero, memory mem)
5040 %{
5041 match(Set mem(StoreI mem zero));
5042
5043 ins_cost(STORE_COST);
5044 format %{ "sw zr, $mem\t# int, #@storeimmI0" %}
5045
5046 ins_encode %{
5047 __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
5048 %}
5049
5050 ins_pipe(istore_mem);
5051 %}
5052
5053 // Store Long (64 bit signed)
5054 instruct storeL(iRegL src, memory mem)
5055 %{
5056 match(Set mem (StoreL mem src));
5057
5058 ins_cost(STORE_COST);
5059 format %{ "sd $src, $mem\t# long, #@storeL" %}
5060
5061 ins_encode %{
5062 __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5063 %}
5064
5065 ins_pipe(istore_reg_mem);
5066 %}
5067
5068 // Store Long (64 bit signed)
5069 instruct storeimmL0(immL0 zero, memory mem)
5070 %{
5071 match(Set mem (StoreL mem zero));
5072
5073 ins_cost(STORE_COST);
5074 format %{ "sd zr, $mem\t# long, #@storeimmL0" %}
5075
5076 ins_encode %{
5077 __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
5078 %}
5079
5080 ins_pipe(istore_mem);
5081 %}
5082
5083 // Store Pointer
5084 instruct storeP(iRegP src, memory mem)
5085 %{
5086 match(Set mem (StoreP mem src));
5087 predicate(n->as_Store()->barrier_data() == 0);
5088
5089 ins_cost(STORE_COST);
5090 format %{ "sd $src, $mem\t# ptr, #@storeP" %}
5091
5092 ins_encode %{
5093 __ sd(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5094 %}
5095
5096 ins_pipe(istore_reg_mem);
5097 %}
5098
5099 // Store Pointer
5100 instruct storeimmP0(immP0 zero, memory mem)
5101 %{
5102 match(Set mem (StoreP mem zero));
5103 predicate(n->as_Store()->barrier_data() == 0);
5104
5105 ins_cost(STORE_COST);
5106 format %{ "sd zr, $mem\t# ptr, #@storeimmP0" %}
5107
5108 ins_encode %{
5109 __ sd(zr, Address(as_Register($mem$$base), $mem$$disp));
5110 %}
5111
5112 ins_pipe(istore_mem);
5113 %}
5114
5115 // Store Compressed Pointer
5116 instruct storeN(iRegN src, memory mem)
5117 %{
5118 predicate(n->as_Store()->barrier_data() == 0);
5119 match(Set mem (StoreN mem src));
5120
5121 ins_cost(STORE_COST);
5122 format %{ "sw $src, $mem\t# compressed ptr, #@storeN" %}
5123
5124 ins_encode %{
5125 __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5126 %}
5127
5128 ins_pipe(istore_reg_mem);
5129 %}
5130
5131 instruct storeImmN0(immN0 zero, memory mem)
5132 %{
5133 predicate(n->as_Store()->barrier_data() == 0);
5134 match(Set mem (StoreN mem zero));
5135
5136 ins_cost(STORE_COST);
5137 format %{ "sw zr, $mem\t# compressed ptr, #@storeImmN0" %}
5138
5139 ins_encode %{
5140 __ sw(zr, Address(as_Register($mem$$base), $mem$$disp));
5141 %}
5142
5143 ins_pipe(istore_reg_mem);
5144 %}
5145
5146 // Store Float
5147 instruct storeF(fRegF src, memory mem)
5148 %{
5149 match(Set mem (StoreF mem src));
5150
5151 ins_cost(STORE_COST);
5152 format %{ "fsw $src, $mem\t# float, #@storeF" %}
5153
5154 ins_encode %{
5155 __ fsw(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5156 %}
5157
5158 ins_pipe(fp_store_reg_s);
5159 %}
5160
5161 // Store Double
5162 instruct storeD(fRegD src, memory mem)
5163 %{
5164 match(Set mem (StoreD mem src));
5165
5166 ins_cost(STORE_COST);
5167 format %{ "fsd $src, $mem\t# double, #@storeD" %}
5168
5169 ins_encode %{
5170 __ fsd(as_FloatRegister($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5171 %}
5172
5173 ins_pipe(fp_store_reg_d);
5174 %}
5175
5176 // Store Compressed Klass Pointer
5177 instruct storeNKlass(iRegN src, memory mem)
5178 %{
5179 match(Set mem (StoreNKlass mem src));
5180
5181 ins_cost(STORE_COST);
5182 format %{ "sw $src, $mem\t# compressed klass ptr, #@storeNKlass" %}
5183
5184 ins_encode %{
5185 __ sw(as_Register($src$$reg), Address(as_Register($mem$$base), $mem$$disp));
5186 %}
5187
5188 ins_pipe(istore_reg_mem);
5189 %}
5190
5191 // ============================================================================
5192 // Prefetch instructions
5193 // Must be safe to execute with invalid address (cannot fault).
5194
5195 instruct prefetchalloc( memory mem ) %{
5196 predicate(UseZicbop);
5197 match(PrefetchAllocation mem);
5198
5199 ins_cost(ALU_COST * 1);
5200 format %{ "prefetch_w $mem\t# Prefetch for write" %}
5201
5202 ins_encode %{
5203 if (Assembler::is_simm12($mem$$disp)) {
5204 if (($mem$$disp & 0x1f) == 0) {
5205 __ prefetch_w(as_Register($mem$$base), $mem$$disp);
5206 } else {
5207 __ addi(t0, as_Register($mem$$base), $mem$$disp);
5208 __ prefetch_w(t0, 0);
5209 }
5210 } else {
5211 __ mv(t0, $mem$$disp);
5212 __ add(t0, as_Register($mem$$base), t0);
5213 __ prefetch_w(t0, 0);
5214 }
5215 %}
5216
5217 ins_pipe(iload_prefetch);
5218 %}
5219
5220 // ============================================================================
5221 // Atomic operation instructions
5222 //
5223
5224 // standard CompareAndSwapX when we are using barriers
5225 // these have higher priority than the rules selected by a predicate
5226 instruct compareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5227 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5228 %{
5229 predicate(!UseZabha || !UseZacas);
5230
5231 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5232
5233 ins_cost(2 * VOLATILE_REF_COST);
5234
5235 effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
5236
5237 format %{
5238 "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5239 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB_narrow"
5240 %}
5241
5242 ins_encode %{
5243 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5244 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5245 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5246 %}
5247
5248 ins_pipe(pipe_slow);
5249 %}
5250
5251 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5252 %{
5253 predicate(UseZabha && UseZacas);
5254
5255 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5256
5257 ins_cost(2 * VOLATILE_REF_COST);
5258
5259 format %{
5260 "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5261 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB"
5262 %}
5263
5264 ins_encode %{
5265 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5266 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5267 true /* result as bool */);
5268 %}
5269
5270 ins_pipe(pipe_slow);
5271 %}
5272
5273 instruct compareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5274 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5275 %{
5276 predicate(!UseZabha || !UseZacas);
5277
5278 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5279
5280 ins_cost(2 * VOLATILE_REF_COST);
5281
5282 effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
5283
5284 format %{
5285 "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5286 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS_narrow"
5287 %}
5288
5289 ins_encode %{
5290 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5291 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5292 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5293 %}
5294
5295 ins_pipe(pipe_slow);
5296 %}
5297
5298 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5299 %{
5300 predicate(UseZabha && UseZacas);
5301
5302 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5303
5304 ins_cost(2 * VOLATILE_REF_COST);
5305
5306 format %{
5307 "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5308 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS"
5309 %}
5310
5311 ins_encode %{
5312 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5313 Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
5314 true /* result as bool */);
5315 %}
5316
5317 ins_pipe(pipe_slow);
5318 %}
5319
5320 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5321 %{
5322 match(Set res (CompareAndSwapI mem (Binary oldval newval)));
5323
5324 ins_cost(2 * VOLATILE_REF_COST);
5325
5326 format %{
5327 "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
5328 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI"
5329 %}
5330
5331 ins_encode %{
5332 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5333 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5334 /*result as bool*/ true);
5335 %}
5336
5337 ins_pipe(pipe_slow);
5338 %}
5339
5340 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
5341 %{
5342 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
5343
5344 ins_cost(2 * VOLATILE_REF_COST);
5345
5346 format %{
5347 "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
5348 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL"
5349 %}
5350
5351 ins_encode %{
5352 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5353 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5354 /*result as bool*/ true);
5355 %}
5356
5357 ins_pipe(pipe_slow);
5358 %}
5359
5360 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
5361 %{
5362 predicate(n->as_LoadStore()->barrier_data() == 0);
5363
5364 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
5365
5366 ins_cost(2 * VOLATILE_REF_COST);
5367
5368 format %{
5369 "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
5370 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP"
5371 %}
5372
5373 ins_encode %{
5374 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5375 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5376 /*result as bool*/ true);
5377 %}
5378
5379 ins_pipe(pipe_slow);
5380 %}
5381
5382 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
5383 %{
5384 predicate(n->as_LoadStore()->barrier_data() == 0);
5385
5386 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
5387
5388 ins_cost(2 * VOLATILE_REF_COST);
5389
5390 format %{
5391 "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
5392 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN"
5393 %}
5394
5395 ins_encode %{
5396 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5397 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5398 /*result as bool*/ true);
5399 %}
5400
5401 ins_pipe(pipe_slow);
5402 %}
5403
5404 // alternative CompareAndSwapX when we are eliding barriers
5405 instruct compareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5406 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5407 %{
5408 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5409
5410 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5411
5412 ins_cost(2 * VOLATILE_REF_COST);
5413
5414 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5415
5416 format %{
5417 "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5418 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq_narrow"
5419 %}
5420
5421 ins_encode %{
5422 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5423 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5424 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5425 %}
5426
5427 ins_pipe(pipe_slow);
5428 %}
5429
5430 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5431 %{
5432 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5433
5434 match(Set res (CompareAndSwapB mem (Binary oldval newval)));
5435
5436 ins_cost(2 * VOLATILE_REF_COST);
5437
5438 format %{
5439 "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
5440 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq"
5441 %}
5442
5443 ins_encode %{
5444 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5445 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5446 true /* result as bool */);
5447 %}
5448
5449 ins_pipe(pipe_slow);
5450 %}
5451
5452 instruct compareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5453 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5454 %{
5455 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5456
5457 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5458
5459 ins_cost(2 * VOLATILE_REF_COST);
5460
5461 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5462
5463 format %{
5464 "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5465 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq_narrow"
5466 %}
5467
5468 ins_encode %{
5469 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5470 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5471 true /* result as bool */, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5472 %}
5473
5474 ins_pipe(pipe_slow);
5475 %}
5476
5477 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5478 %{
5479 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5480
5481 match(Set res (CompareAndSwapS mem (Binary oldval newval)));
5482
5483 ins_cost(2 * VOLATILE_REF_COST);
5484
5485 format %{
5486 "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
5487 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq"
5488 %}
5489
5490 ins_encode %{
5491 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5492 Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
5493 true /* result as bool */);
5494 %}
5495
5496 ins_pipe(pipe_slow);
5497 %}
5498
5499 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5500 %{
5501 predicate(needs_acquiring_load_reserved(n));
5502
5503 match(Set res (CompareAndSwapI mem (Binary oldval newval)));
5504
5505 ins_cost(2 * VOLATILE_REF_COST);
5506
5507 format %{
5508 "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
5509 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq"
5510 %}
5511
5512 ins_encode %{
5513 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5514 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5515 /*result as bool*/ true);
5516 %}
5517
5518 ins_pipe(pipe_slow);
5519 %}
5520
5521 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
5522 %{
5523 predicate(needs_acquiring_load_reserved(n));
5524
5525 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
5526
5527 ins_cost(2 * VOLATILE_REF_COST);
5528
5529 format %{
5530 "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
5531 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq"
5532 %}
5533
5534 ins_encode %{
5535 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5536 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5537 /*result as bool*/ true);
5538 %}
5539
5540 ins_pipe(pipe_slow);
5541 %}
5542
5543 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
5544 %{
5545 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
5546
5547 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
5548
5549 ins_cost(2 * VOLATILE_REF_COST);
5550
5551 format %{
5552 "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
5553 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq"
5554 %}
5555
5556 ins_encode %{
5557 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5558 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5559 /*result as bool*/ true);
5560 %}
5561
5562 ins_pipe(pipe_slow);
5563 %}
5564
5565 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
5566 %{
5567 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
5568
5569 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
5570
5571 ins_cost(2 * VOLATILE_REF_COST);
5572
5573 format %{
5574 "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
5575 "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq"
5576 %}
5577
5578 ins_encode %{
5579 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5580 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5581 /*result as bool*/ true);
5582 %}
5583
5584 ins_pipe(pipe_slow);
5585 %}
5586
5587 // Sundry CAS operations. Note that release is always true,
5588 // regardless of the memory ordering of the CAS. This is because we
5589 // need the volatile case to be sequentially consistent but there is
5590 // no trailing StoreLoad barrier emitted by C2. Unfortunately we
5591 // can't check the type of memory ordering here, so we always emit a
5592 // sc_d(w) with rl bit set.
5593 instruct compareAndExchangeB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5594 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5595 %{
5596 predicate(!UseZabha || !UseZacas);
5597
5598 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5599
5600 ins_cost(2 * VOLATILE_REF_COST);
5601
5602 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5603
5604 format %{
5605 "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB_narrow"
5606 %}
5607
5608 ins_encode %{
5609 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5610 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5611 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5612 %}
5613
5614 ins_pipe(pipe_slow);
5615 %}
5616
5617 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5618 %{
5619 predicate(UseZabha && UseZacas);
5620
5621 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5622
5623 ins_cost(2 * VOLATILE_REF_COST);
5624
5625 format %{
5626 "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB"
5627 %}
5628
5629 ins_encode %{
5630 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5631 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5632 %}
5633
5634 ins_pipe(pipe_slow);
5635 %}
5636
5637 instruct compareAndExchangeS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5638 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5639 %{
5640 predicate(!UseZabha || !UseZacas);
5641
5642 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5643
5644 ins_cost(2 * VOLATILE_REF_COST);
5645
5646 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5647
5648 format %{
5649 "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS_narrow"
5650 %}
5651
5652 ins_encode %{
5653 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5654 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5655 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5656 %}
5657
5658 ins_pipe(pipe_slow);
5659 %}
5660
5661 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5662 %{
5663 predicate(UseZabha && UseZacas);
5664
5665 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5666
5667 ins_cost(2 * VOLATILE_REF_COST);
5668
5669 format %{
5670 "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS"
5671 %}
5672
5673 ins_encode %{
5674 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5675 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5676 %}
5677
5678 ins_pipe(pipe_slow);
5679 %}
5680
5681 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5682 %{
5683 match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
5684
5685 ins_cost(2 * VOLATILE_REF_COST);
5686
5687 format %{
5688 "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI"
5689 %}
5690
5691 ins_encode %{
5692 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5693 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5694 %}
5695
5696 ins_pipe(pipe_slow);
5697 %}
5698
5699 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
5700 %{
5701 match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
5702
5703 ins_cost(2 * VOLATILE_REF_COST);
5704
5705 format %{
5706 "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL"
5707 %}
5708
5709 ins_encode %{
5710 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5711 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5712 %}
5713
5714 ins_pipe(pipe_slow);
5715 %}
5716
5717 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
5718 %{
5719 predicate(n->as_LoadStore()->barrier_data() == 0);
5720
5721 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
5722
5723 ins_cost(2 * VOLATILE_REF_COST);
5724
5725 format %{
5726 "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN"
5727 %}
5728
5729 ins_encode %{
5730 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5731 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5732 %}
5733
5734 ins_pipe(pipe_slow);
5735 %}
5736
5737 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
5738 %{
5739 predicate(n->as_LoadStore()->barrier_data() == 0);
5740
5741 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
5742
5743 ins_cost(2 * VOLATILE_REF_COST);
5744
5745 format %{
5746 "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP"
5747 %}
5748
5749 ins_encode %{
5750 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5751 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5752 %}
5753
5754 ins_pipe(pipe_slow);
5755 %}
5756
5757 instruct compareAndExchangeBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5758 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5759 %{
5760 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5761
5762 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5763
5764 ins_cost(2 * VOLATILE_REF_COST);
5765
5766 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5767
5768 format %{
5769 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq_narrow"
5770 %}
5771
5772 ins_encode %{
5773 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5774 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5775 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5776 %}
5777
5778 ins_pipe(pipe_slow);
5779 %}
5780
5781 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5782 %{
5783 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5784
5785 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
5786
5787 ins_cost(2 * VOLATILE_REF_COST);
5788
5789 format %{
5790 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq"
5791 %}
5792
5793 ins_encode %{
5794 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5795 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5796 %}
5797
5798 ins_pipe(pipe_slow);
5799 %}
5800
5801 instruct compareAndExchangeSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5802 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5803 %{
5804 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
5805
5806 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5807
5808 ins_cost(2 * VOLATILE_REF_COST);
5809
5810 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5811
5812 format %{
5813 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq_narrow"
5814 %}
5815
5816 ins_encode %{
5817 __ cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5818 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
5819 /*result_as_bool*/ false, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5820 %}
5821
5822 ins_pipe(pipe_slow);
5823 %}
5824
5825 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5826 %{
5827 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
5828
5829 match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
5830
5831 ins_cost(2 * VOLATILE_REF_COST);
5832
5833 format %{
5834 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq"
5835 %}
5836
5837 ins_encode %{
5838 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5839 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5840 %}
5841
5842 ins_pipe(pipe_slow);
5843 %}
5844
5845 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5846 %{
5847 predicate(needs_acquiring_load_reserved(n));
5848
5849 match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
5850
5851 ins_cost(2 * VOLATILE_REF_COST);
5852
5853 format %{
5854 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq"
5855 %}
5856
5857 ins_encode %{
5858 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
5859 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5860 %}
5861
5862 ins_pipe(pipe_slow);
5863 %}
5864
5865 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval)
5866 %{
5867 predicate(needs_acquiring_load_reserved(n));
5868
5869 match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
5870
5871 ins_cost(2 * VOLATILE_REF_COST);
5872
5873 format %{
5874 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq"
5875 %}
5876
5877 ins_encode %{
5878 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5879 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5880 %}
5881
5882 ins_pipe(pipe_slow);
5883 %}
5884
5885 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
5886 %{
5887 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
5888
5889 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
5890
5891 ins_cost(2 * VOLATILE_REF_COST);
5892
5893 format %{
5894 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq"
5895 %}
5896
5897 ins_encode %{
5898 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
5899 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5900 %}
5901
5902 ins_pipe(pipe_slow);
5903 %}
5904
5905 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
5906 %{
5907 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
5908
5909 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
5910
5911 ins_cost(2 * VOLATILE_REF_COST);
5912
5913 format %{
5914 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq"
5915 %}
5916
5917 ins_encode %{
5918 __ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
5919 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
5920 %}
5921
5922 ins_pipe(pipe_slow);
5923 %}
5924
5925 instruct weakCompareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5926 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5927 %{
5928 predicate(!UseZabha || !UseZacas);
5929
5930 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
5931
5932 ins_cost(2 * VOLATILE_REF_COST);
5933
5934 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5935
5936 format %{
5937 "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5938 "# $res == 1 when success, #@weakCompareAndSwapB_narrow"
5939 %}
5940
5941 ins_encode %{
5942 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5943 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5944 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5945 %}
5946
5947 ins_pipe(pipe_slow);
5948 %}
5949
5950 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5951 %{
5952 predicate(UseZabha && UseZacas);
5953
5954 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
5955
5956 ins_cost(2 * VOLATILE_REF_COST);
5957
5958 format %{
5959 "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5960 "# $res == 1 when success, #@weakCompareAndSwapB"
5961 %}
5962
5963 ins_encode %{
5964 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
5965 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
5966 %}
5967
5968 ins_pipe(pipe_slow);
5969 %}
5970
5971 instruct weakCompareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
5972 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
5973 %{
5974 predicate(!UseZabha || !UseZacas);
5975
5976 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
5977
5978 ins_cost(2 * VOLATILE_REF_COST);
5979
5980 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
5981
5982 format %{
5983 "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
5984 "# $res == 1 when success, #@weakCompareAndSwapS_narrow"
5985 %}
5986
5987 ins_encode %{
5988 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
5989 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
5990 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
5991 %}
5992
5993 ins_pipe(pipe_slow);
5994 %}
5995
5996 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
5997 %{
5998 predicate(UseZabha && UseZacas);
5999
6000 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
6001
6002 ins_cost(2 * VOLATILE_REF_COST);
6003
6004 format %{
6005 "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6006 "# $res == 1 when success, #@weakCompareAndSwapS"
6007 %}
6008
6009 ins_encode %{
6010 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
6011 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6012 %}
6013
6014 ins_pipe(pipe_slow);
6015 %}
6016
6017 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
6018 %{
6019 match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
6020
6021 ins_cost(2 * VOLATILE_REF_COST);
6022
6023 format %{
6024 "weak_cmpxchg $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6025 "# $res == 1 when success, #@weakCompareAndSwapI"
6026 %}
6027
6028 ins_encode %{
6029 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
6030 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6031 %}
6032
6033 ins_pipe(pipe_slow);
6034 %}
6035
6036 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
6037 %{
6038 match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
6039
6040 ins_cost(2 * VOLATILE_REF_COST);
6041
6042 format %{
6043 "weak_cmpxchg $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6044 "# $res == 1 when success, #@weakCompareAndSwapL"
6045 %}
6046
6047 ins_encode %{
6048 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6049 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6050 %}
6051
6052 ins_pipe(pipe_slow);
6053 %}
6054
6055 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
6056 %{
6057 predicate(n->as_LoadStore()->barrier_data() == 0);
6058
6059 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
6060
6061 ins_cost(2 * VOLATILE_REF_COST);
6062
6063 format %{
6064 "weak_cmpxchg $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6065 "# $res == 1 when success, #@weakCompareAndSwapN"
6066 %}
6067
6068 ins_encode %{
6069 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
6070 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6071 %}
6072
6073 ins_pipe(pipe_slow);
6074 %}
6075
6076 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
6077 %{
6078 predicate(n->as_LoadStore()->barrier_data() == 0);
6079
6080 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
6081
6082 ins_cost(2 * VOLATILE_REF_COST);
6083
6084 format %{
6085 "weak_cmpxchg $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6086 "# $res == 1 when success, #@weakCompareAndSwapP"
6087 %}
6088
6089 ins_encode %{
6090 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6091 /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
6092 %}
6093
6094 ins_pipe(pipe_slow);
6095 %}
6096
6097 instruct weakCompareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
6098 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
6099 %{
6100 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
6101
6102 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
6103
6104 ins_cost(2 * VOLATILE_REF_COST);
6105
6106 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
6107
6108 format %{
6109 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6110 "# $res == 1 when success, #@weakCompareAndSwapBAcq_narrow"
6111 %}
6112
6113 ins_encode %{
6114 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
6115 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
6116 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
6117 %}
6118
6119 ins_pipe(pipe_slow);
6120 %}
6121
6122 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
6123 %{
6124 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
6125
6126 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
6127
6128 ins_cost(2 * VOLATILE_REF_COST);
6129
6130 format %{
6131 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6132 "# $res == 1 when success, #@weakCompareAndSwapBAcq"
6133 %}
6134
6135 ins_encode %{
6136 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
6137 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6138 %}
6139
6140 ins_pipe(pipe_slow);
6141 %}
6142
6143 instruct weakCompareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
6144 iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
6145 %{
6146 predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
6147
6148 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
6149
6150 ins_cost(2 * VOLATILE_REF_COST);
6151
6152 effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
6153
6154 format %{
6155 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6156 "# $res == 1 when success, #@weakCompareAndSwapSAcq_narrow"
6157 %}
6158
6159 ins_encode %{
6160 __ weak_cmpxchg_narrow_value(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
6161 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
6162 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
6163 %}
6164
6165 ins_pipe(pipe_slow);
6166 %}
6167
6168 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
6169 %{
6170 predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
6171
6172 match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
6173
6174 ins_cost(2 * VOLATILE_REF_COST);
6175
6176 format %{
6177 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6178 "# $res == 1 when success, #@weakCompareAndSwapSAcq"
6179 %}
6180
6181 ins_encode %{
6182 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
6183 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6184 %}
6185
6186 ins_pipe(pipe_slow);
6187 %}
6188
6189 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
6190 %{
6191 predicate(needs_acquiring_load_reserved(n));
6192
6193 match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
6194
6195 ins_cost(2 * VOLATILE_REF_COST);
6196
6197 format %{
6198 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6199 "# $res == 1 when success, #@weakCompareAndSwapIAcq"
6200 %}
6201
6202 ins_encode %{
6203 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
6204 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6205 %}
6206
6207 ins_pipe(pipe_slow);
6208 %}
6209
6210 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval)
6211 %{
6212 predicate(needs_acquiring_load_reserved(n));
6213
6214 match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
6215
6216 ins_cost(2 * VOLATILE_REF_COST);
6217
6218 format %{
6219 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6220 "# $res == 1 when success, #@weakCompareAndSwapLAcq"
6221 %}
6222
6223 ins_encode %{
6224 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6225 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6226 %}
6227
6228 ins_pipe(pipe_slow);
6229 %}
6230
6231 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
6232 %{
6233 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
6234
6235 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
6236
6237 ins_cost(2 * VOLATILE_REF_COST);
6238
6239 format %{
6240 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6241 "# $res == 1 when success, #@weakCompareAndSwapNAcq"
6242 %}
6243
6244 ins_encode %{
6245 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
6246 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6247 %}
6248
6249 ins_pipe(pipe_slow);
6250 %}
6251
6252 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
6253 %{
6254 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
6255
6256 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
6257
6258 ins_cost(2 * VOLATILE_REF_COST);
6259
6260 format %{
6261 "weak_cmpxchg_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
6262 "\t# $res == 1 when success, #@weakCompareAndSwapPAcq"
6263 %}
6264
6265 ins_encode %{
6266 __ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
6267 /*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
6268 %}
6269
6270 ins_pipe(pipe_slow);
6271 %}
6272
6273 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev)
6274 %{
6275 match(Set prev (GetAndSetI mem newv));
6276
6277 ins_cost(ALU_COST);
6278
6279 format %{ "atomic_xchgw $prev, $newv, [$mem]\t#@get_and_setI" %}
6280
6281 ins_encode %{
6282 __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
6283 %}
6284
6285 ins_pipe(pipe_serial);
6286 %}
6287
6288 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev)
6289 %{
6290 match(Set prev (GetAndSetL mem newv));
6291
6292 ins_cost(ALU_COST);
6293
6294 format %{ "atomic_xchg $prev, $newv, [$mem]\t#@get_and_setL" %}
6295
6296 ins_encode %{
6297 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
6298 %}
6299
6300 ins_pipe(pipe_serial);
6301 %}
6302
6303 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev)
6304 %{
6305 predicate(n->as_LoadStore()->barrier_data() == 0);
6306
6307 match(Set prev (GetAndSetN mem newv));
6308
6309 ins_cost(ALU_COST);
6310
6311 format %{ "atomic_xchgwu $prev, $newv, [$mem]\t#@get_and_setN" %}
6312
6313 ins_encode %{
6314 __ atomic_xchgwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
6315 %}
6316
6317 ins_pipe(pipe_serial);
6318 %}
6319
6320 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev)
6321 %{
6322 predicate(n->as_LoadStore()->barrier_data() == 0);
6323 match(Set prev (GetAndSetP mem newv));
6324
6325 ins_cost(ALU_COST);
6326
6327 format %{ "atomic_xchg $prev, $newv, [$mem]\t#@get_and_setP" %}
6328
6329 ins_encode %{
6330 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
6331 %}
6332
6333 ins_pipe(pipe_serial);
6334 %}
6335
6336 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev)
6337 %{
6338 predicate(needs_acquiring_load_reserved(n));
6339
6340 match(Set prev (GetAndSetI mem newv));
6341
6342 ins_cost(ALU_COST);
6343
6344 format %{ "atomic_xchgw_acq $prev, $newv, [$mem]\t#@get_and_setIAcq" %}
6345
6346 ins_encode %{
6347 __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
6348 %}
6349
6350 ins_pipe(pipe_serial);
6351 %}
6352
6353 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev)
6354 %{
6355 predicate(needs_acquiring_load_reserved(n));
6356
6357 match(Set prev (GetAndSetL mem newv));
6358
6359 ins_cost(ALU_COST);
6360
6361 format %{ "atomic_xchg_acq $prev, $newv, [$mem]\t#@get_and_setLAcq" %}
6362
6363 ins_encode %{
6364 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
6365 %}
6366
6367 ins_pipe(pipe_serial);
6368 %}
6369
6370 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev)
6371 %{
6372 predicate(needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == 0);
6373
6374 match(Set prev (GetAndSetN mem newv));
6375
6376 ins_cost(ALU_COST);
6377
6378 format %{ "atomic_xchgwu_acq $prev, $newv, [$mem]\t#@get_and_setNAcq" %}
6379
6380 ins_encode %{
6381 __ atomic_xchgalwu($prev$$Register, $newv$$Register, as_Register($mem$$base));
6382 %}
6383
6384 ins_pipe(pipe_serial);
6385 %}
6386
6387 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev)
6388 %{
6389 predicate(needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == 0));
6390
6391 match(Set prev (GetAndSetP mem newv));
6392
6393 ins_cost(ALU_COST);
6394
6395 format %{ "atomic_xchg_acq $prev, $newv, [$mem]\t#@get_and_setPAcq" %}
6396
6397 ins_encode %{
6398 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
6399 %}
6400
6401 ins_pipe(pipe_serial);
6402 %}
6403
6404 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr)
6405 %{
6406 match(Set newval (GetAndAddL mem incr));
6407
6408 ins_cost(ALU_COST);
6409
6410 format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addL" %}
6411
6412 ins_encode %{
6413 __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
6414 %}
6415
6416 ins_pipe(pipe_serial);
6417 %}
6418
6419 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr)
6420 %{
6421 predicate(n->as_LoadStore()->result_not_used());
6422
6423 match(Set dummy (GetAndAddL mem incr));
6424
6425 ins_cost(ALU_COST);
6426
6427 format %{ "get_and_addL [$mem], $incr\t#@get_and_addL_no_res" %}
6428
6429 ins_encode %{
6430 __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
6431 %}
6432
6433 ins_pipe(pipe_serial);
6434 %}
6435
6436 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAdd incr)
6437 %{
6438 match(Set newval (GetAndAddL mem incr));
6439
6440 ins_cost(ALU_COST);
6441
6442 format %{ "get_and_addL $newval, [$mem], $incr\t#@get_and_addLi" %}
6443
6444 ins_encode %{
6445 __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
6446 %}
6447
6448 ins_pipe(pipe_serial);
6449 %}
6450
6451 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAdd incr)
6452 %{
6453 predicate(n->as_LoadStore()->result_not_used());
6454
6455 match(Set dummy (GetAndAddL mem incr));
6456
6457 ins_cost(ALU_COST);
6458
6459 format %{ "get_and_addL [$mem], $incr\t#@get_and_addLi_no_res" %}
6460
6461 ins_encode %{
6462 __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
6463 %}
6464
6465 ins_pipe(pipe_serial);
6466 %}
6467
6468 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr)
6469 %{
6470 match(Set newval (GetAndAddI mem incr));
6471
6472 ins_cost(ALU_COST);
6473
6474 format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addI" %}
6475
6476 ins_encode %{
6477 __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
6478 %}
6479
6480 ins_pipe(pipe_serial);
6481 %}
6482
6483 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr)
6484 %{
6485 predicate(n->as_LoadStore()->result_not_used());
6486
6487 match(Set dummy (GetAndAddI mem incr));
6488
6489 ins_cost(ALU_COST);
6490
6491 format %{ "get_and_addI [$mem], $incr\t#@get_and_addI_no_res" %}
6492
6493 ins_encode %{
6494 __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
6495 %}
6496
6497 ins_pipe(pipe_serial);
6498 %}
6499
6500 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAdd incr)
6501 %{
6502 match(Set newval (GetAndAddI mem incr));
6503
6504 ins_cost(ALU_COST);
6505
6506 format %{ "get_and_addI $newval, [$mem], $incr\t#@get_and_addIi" %}
6507
6508 ins_encode %{
6509 __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
6510 %}
6511
6512 ins_pipe(pipe_serial);
6513 %}
6514
6515 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAdd incr)
6516 %{
6517 predicate(n->as_LoadStore()->result_not_used());
6518
6519 match(Set dummy (GetAndAddI mem incr));
6520
6521 ins_cost(ALU_COST);
6522
6523 format %{ "get_and_addI [$mem], $incr\t#@get_and_addIi_no_res" %}
6524
6525 ins_encode %{
6526 __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
6527 %}
6528
6529 ins_pipe(pipe_serial);
6530 %}
6531
6532 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr)
6533 %{
6534 predicate(needs_acquiring_load_reserved(n));
6535
6536 match(Set newval (GetAndAddL mem incr));
6537
6538 ins_cost(ALU_COST);
6539
6540 format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLAcq" %}
6541
6542 ins_encode %{
6543 __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
6544 %}
6545
6546 ins_pipe(pipe_serial);
6547 %}
6548
6549 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
6550 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6551
6552 match(Set dummy (GetAndAddL mem incr));
6553
6554 ins_cost(ALU_COST);
6555
6556 format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addL_no_resAcq" %}
6557
6558 ins_encode %{
6559 __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
6560 %}
6561
6562 ins_pipe(pipe_serial);
6563 %}
6564
6565 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAdd incr)
6566 %{
6567 predicate(needs_acquiring_load_reserved(n));
6568
6569 match(Set newval (GetAndAddL mem incr));
6570
6571 ins_cost(ALU_COST);
6572
6573 format %{ "get_and_addL_acq $newval, [$mem], $incr\t#@get_and_addLiAcq" %}
6574
6575 ins_encode %{
6576 __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
6577 %}
6578
6579 ins_pipe(pipe_serial);
6580 %}
6581
6582 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAdd incr)
6583 %{
6584 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6585
6586 match(Set dummy (GetAndAddL mem incr));
6587
6588 ins_cost(ALU_COST);
6589
6590 format %{ "get_and_addL_acq [$mem], $incr\t#@get_and_addLi_no_resAcq" %}
6591
6592 ins_encode %{
6593 __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
6594 %}
6595
6596 ins_pipe(pipe_serial);
6597 %}
6598
6599 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr)
6600 %{
6601 predicate(needs_acquiring_load_reserved(n));
6602
6603 match(Set newval (GetAndAddI mem incr));
6604
6605 ins_cost(ALU_COST);
6606
6607 format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIAcq" %}
6608
6609 ins_encode %{
6610 __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
6611 %}
6612
6613 ins_pipe(pipe_serial);
6614 %}
6615
6616 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr)
6617 %{
6618 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6619
6620 match(Set dummy (GetAndAddI mem incr));
6621
6622 ins_cost(ALU_COST);
6623
6624 format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addI_no_resAcq" %}
6625
6626 ins_encode %{
6627 __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
6628 %}
6629
6630 ins_pipe(pipe_serial);
6631 %}
6632
6633 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAdd incr)
6634 %{
6635 predicate(needs_acquiring_load_reserved(n));
6636
6637 match(Set newval (GetAndAddI mem incr));
6638
6639 ins_cost(ALU_COST);
6640
6641 format %{ "get_and_addI_acq $newval, [$mem], $incr\t#@get_and_addIiAcq" %}
6642
6643 ins_encode %{
6644 __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
6645 %}
6646
6647 ins_pipe(pipe_serial);
6648 %}
6649
6650 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAdd incr)
6651 %{
6652 predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_reserved(n));
6653
6654 match(Set dummy (GetAndAddI mem incr));
6655
6656 ins_cost(ALU_COST);
6657
6658 format %{ "get_and_addI_acq [$mem], $incr\t#@get_and_addIi_no_resAcq" %}
6659
6660 ins_encode %{
6661 __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
6662 %}
6663
6664 ins_pipe(pipe_serial);
6665 %}
6666
6667 // ============================================================================
6668 // Arithmetic Instructions
6669 //
6670
6671 // Integer Addition
6672
6673 // TODO
6674 // these currently employ operations which do not set CR and hence are
6675 // not flagged as killing CR but we would like to isolate the cases
6676 // where we want to set flags from those where we don't. need to work
6677 // out how to do that.
6678 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6679 match(Set dst (AddI src1 src2));
6680
6681 ins_cost(ALU_COST);
6682 format %{ "addw $dst, $src1, $src2\t#@addI_reg_reg" %}
6683
6684 ins_encode %{
6685 __ addw(as_Register($dst$$reg),
6686 as_Register($src1$$reg),
6687 as_Register($src2$$reg));
6688 %}
6689
6690 ins_pipe(ialu_reg_reg);
6691 %}
6692
6693 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAdd src2) %{
6694 match(Set dst (AddI src1 src2));
6695
6696 ins_cost(ALU_COST);
6697 format %{ "addiw $dst, $src1, $src2\t#@addI_reg_imm" %}
6698
6699 ins_encode %{
6700 __ addiw(as_Register($dst$$reg),
6701 as_Register($src1$$reg),
6702 $src2$$constant);
6703 %}
6704
6705 ins_pipe(ialu_reg_imm);
6706 %}
6707
6708 instruct addI_reg_imm_l2i(iRegINoSp dst, iRegL src1, immIAdd src2) %{
6709 match(Set dst (AddI (ConvL2I src1) src2));
6710
6711 ins_cost(ALU_COST);
6712 format %{ "addiw $dst, $src1, $src2\t#@addI_reg_imm_l2i" %}
6713
6714 ins_encode %{
6715 __ addiw(as_Register($dst$$reg),
6716 as_Register($src1$$reg),
6717 $src2$$constant);
6718 %}
6719
6720 ins_pipe(ialu_reg_imm);
6721 %}
6722
6723 // Pointer Addition
6724 instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
6725 match(Set dst (AddP src1 src2));
6726
6727 ins_cost(ALU_COST);
6728 format %{ "add $dst, $src1, $src2\t# ptr, #@addP_reg_reg" %}
6729
6730 ins_encode %{
6731 __ add(as_Register($dst$$reg),
6732 as_Register($src1$$reg),
6733 as_Register($src2$$reg));
6734 %}
6735
6736 ins_pipe(ialu_reg_reg);
6737 %}
6738
6739 // If we shift more than 32 bits, we need not convert I2L.
6740 instruct lShiftL_regI_immGE32(iRegLNoSp dst, iRegI src, uimmI6_ge32 scale) %{
6741 match(Set dst (LShiftL (ConvI2L src) scale));
6742 ins_cost(ALU_COST);
6743 format %{ "slli $dst, $src, $scale & 63\t#@lShiftL_regI_immGE32" %}
6744
6745 ins_encode %{
6746 __ slli(as_Register($dst$$reg), as_Register($src$$reg), $scale$$constant & 63);
6747 %}
6748
6749 ins_pipe(ialu_reg_shift);
6750 %}
6751
6752 // Pointer Immediate Addition
6753 // n.b. this needs to be more expensive than using an indirect memory
6754 // operand
6755 instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAdd src2) %{
6756 match(Set dst (AddP src1 src2));
6757 ins_cost(ALU_COST);
6758 format %{ "addi $dst, $src1, $src2\t# ptr, #@addP_reg_imm" %}
6759
6760 ins_encode %{
6761 __ addi(as_Register($dst$$reg),
6762 as_Register($src1$$reg),
6763 $src2$$constant);
6764 %}
6765
6766 ins_pipe(ialu_reg_imm);
6767 %}
6768
6769 // Long Addition
6770 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6771 match(Set dst (AddL src1 src2));
6772 ins_cost(ALU_COST);
6773 format %{ "add $dst, $src1, $src2\t#@addL_reg_reg" %}
6774
6775 ins_encode %{
6776 __ add(as_Register($dst$$reg),
6777 as_Register($src1$$reg),
6778 as_Register($src2$$reg));
6779 %}
6780
6781 ins_pipe(ialu_reg_reg);
6782 %}
6783
6784 // No constant pool entries requiredLong Immediate Addition.
6785 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
6786 match(Set dst (AddL src1 src2));
6787 ins_cost(ALU_COST);
6788 format %{ "addi $dst, $src1, $src2\t#@addL_reg_imm" %}
6789
6790 ins_encode %{
6791 // src2 is imm, so actually call the addi
6792 __ addi(as_Register($dst$$reg),
6793 as_Register($src1$$reg),
6794 $src2$$constant);
6795 %}
6796
6797 ins_pipe(ialu_reg_imm);
6798 %}
6799
6800 // Integer Subtraction
6801 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6802 match(Set dst (SubI src1 src2));
6803
6804 ins_cost(ALU_COST);
6805 format %{ "subw $dst, $src1, $src2\t#@subI_reg_reg" %}
6806
6807 ins_encode %{
6808 __ subw(as_Register($dst$$reg),
6809 as_Register($src1$$reg),
6810 as_Register($src2$$reg));
6811 %}
6812
6813 ins_pipe(ialu_reg_reg);
6814 %}
6815
6816 // Immediate Subtraction
6817 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immISub src2) %{
6818 match(Set dst (SubI src1 src2));
6819
6820 ins_cost(ALU_COST);
6821 format %{ "addiw $dst, $src1, -$src2\t#@subI_reg_imm" %}
6822
6823 ins_encode %{
6824 // src2 is imm, so actually call the addiw
6825 __ subiw(as_Register($dst$$reg),
6826 as_Register($src1$$reg),
6827 $src2$$constant);
6828 %}
6829
6830 ins_pipe(ialu_reg_imm);
6831 %}
6832
6833 // Long Subtraction
6834 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6835 match(Set dst (SubL src1 src2));
6836 ins_cost(ALU_COST);
6837 format %{ "sub $dst, $src1, $src2\t#@subL_reg_reg" %}
6838
6839 ins_encode %{
6840 __ sub(as_Register($dst$$reg),
6841 as_Register($src1$$reg),
6842 as_Register($src2$$reg));
6843 %}
6844
6845 ins_pipe(ialu_reg_reg);
6846 %}
6847
6848 // No constant pool entries requiredLong Immediate Subtraction.
6849 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLSub src2) %{
6850 match(Set dst (SubL src1 src2));
6851 ins_cost(ALU_COST);
6852 format %{ "addi $dst, $src1, -$src2\t#@subL_reg_imm" %}
6853
6854 ins_encode %{
6855 // src2 is imm, so actually call the addi
6856 __ subi(as_Register($dst$$reg),
6857 as_Register($src1$$reg),
6858 $src2$$constant);
6859 %}
6860
6861 ins_pipe(ialu_reg_imm);
6862 %}
6863
6864 // Integer Negation (special case for sub)
6865
6866 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
6867 match(Set dst (SubI zero src));
6868 ins_cost(ALU_COST);
6869 format %{ "subw $dst, x0, $src\t# int, #@negI_reg" %}
6870
6871 ins_encode %{
6872 // actually call the subw
6873 __ negw(as_Register($dst$$reg),
6874 as_Register($src$$reg));
6875 %}
6876
6877 ins_pipe(ialu_reg);
6878 %}
6879
6880 // Long Negation
6881
6882 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero) %{
6883 match(Set dst (SubL zero src));
6884 ins_cost(ALU_COST);
6885 format %{ "sub $dst, x0, $src\t# long, #@negL_reg" %}
6886
6887 ins_encode %{
6888 // actually call the sub
6889 __ neg(as_Register($dst$$reg),
6890 as_Register($src$$reg));
6891 %}
6892
6893 ins_pipe(ialu_reg);
6894 %}
6895
6896 // Integer Multiply
6897
6898 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6899 match(Set dst (MulI src1 src2));
6900 ins_cost(IMUL_COST);
6901 format %{ "mulw $dst, $src1, $src2\t#@mulI" %}
6902
6903 //this means 2 word multi, and no sign extend to 64 bits
6904 ins_encode %{
6905 // riscv64 mulw will sign-extension to high 32 bits in dst reg
6906 __ mulw(as_Register($dst$$reg),
6907 as_Register($src1$$reg),
6908 as_Register($src2$$reg));
6909 %}
6910
6911 ins_pipe(imul_reg_reg);
6912 %}
6913
6914 // Long Multiply
6915
6916 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6917 match(Set dst (MulL src1 src2));
6918 ins_cost(IMUL_COST);
6919 format %{ "mul $dst, $src1, $src2\t#@mulL" %}
6920
6921 ins_encode %{
6922 __ mul(as_Register($dst$$reg),
6923 as_Register($src1$$reg),
6924 as_Register($src2$$reg));
6925 %}
6926
6927 ins_pipe(lmul_reg_reg);
6928 %}
6929
6930 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
6931 %{
6932 match(Set dst (MulHiL src1 src2));
6933 ins_cost(IMUL_COST);
6934 format %{ "mulh $dst, $src1, $src2\t# mulhi, #@mulHiL_rReg" %}
6935
6936 ins_encode %{
6937 __ mulh(as_Register($dst$$reg),
6938 as_Register($src1$$reg),
6939 as_Register($src2$$reg));
6940 %}
6941
6942 ins_pipe(lmul_reg_reg);
6943 %}
6944
6945 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2)
6946 %{
6947 match(Set dst (UMulHiL src1 src2));
6948 ins_cost(IMUL_COST);
6949 format %{ "mulhu $dst, $src1, $src2\t# umulhi, #@umulHiL_rReg" %}
6950
6951 ins_encode %{
6952 __ mulhu(as_Register($dst$$reg),
6953 as_Register($src1$$reg),
6954 as_Register($src2$$reg));
6955 %}
6956
6957 ins_pipe(lmul_reg_reg);
6958 %}
6959
6960 // Integer Divide
6961
6962 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6963 match(Set dst (DivI src1 src2));
6964 ins_cost(IDIVSI_COST);
6965 format %{ "divw $dst, $src1, $src2\t#@divI"%}
6966
6967 ins_encode %{
6968 __ divw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
6969 %}
6970 ins_pipe(idiv_reg_reg);
6971 %}
6972
6973 instruct UdivI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
6974 match(Set dst (UDivI src1 src2));
6975 ins_cost(IDIVSI_COST);
6976 format %{ "divuw $dst, $src1, $src2\t#@UdivI"%}
6977
6978 ins_encode %{
6979 __ divuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
6980 %}
6981 ins_pipe(idiv_reg_reg);
6982 %}
6983
6984 instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{
6985 match(Set dst (URShiftI (RShiftI src1 div1) div2));
6986 ins_cost(ALU_COST);
6987 format %{ "srliw $dst, $src1, $div1\t# int signExtract, #@signExtract" %}
6988
6989 ins_encode %{
6990 __ srliw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
6991 %}
6992 ins_pipe(ialu_reg_shift);
6993 %}
6994
6995 // Long Divide
6996
6997 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
6998 match(Set dst (DivL src1 src2));
6999 ins_cost(IDIVDI_COST);
7000 format %{ "div $dst, $src1, $src2\t#@divL" %}
7001
7002 ins_encode %{
7003 __ div(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7004 %}
7005 ins_pipe(ldiv_reg_reg);
7006 %}
7007
7008 instruct UdivL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
7009 match(Set dst (UDivL src1 src2));
7010 ins_cost(IDIVDI_COST);
7011
7012 format %{ "divu $dst, $src1, $src2\t#@UdivL" %}
7013
7014 ins_encode %{
7015 __ divu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7016 %}
7017 ins_pipe(ldiv_reg_reg);
7018 %}
7019
7020 instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
7021 match(Set dst (URShiftL (RShiftL src1 div1) div2));
7022 ins_cost(ALU_COST);
7023 format %{ "srli $dst, $src1, $div1\t# long signExtract, #@signExtractL" %}
7024
7025 ins_encode %{
7026 __ srli(as_Register($dst$$reg), as_Register($src1$$reg), 63);
7027 %}
7028 ins_pipe(ialu_reg_shift);
7029 %}
7030
7031 // Integer Remainder
7032
7033 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7034 match(Set dst (ModI src1 src2));
7035 ins_cost(IDIVSI_COST);
7036 format %{ "remw $dst, $src1, $src2\t#@modI" %}
7037
7038 ins_encode %{
7039 __ remw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7040 %}
7041 ins_pipe(ialu_reg_reg);
7042 %}
7043
7044 instruct UmodI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7045 match(Set dst (UModI src1 src2));
7046 ins_cost(IDIVSI_COST);
7047 format %{ "remuw $dst, $src1, $src2\t#@UmodI" %}
7048
7049 ins_encode %{
7050 __ remuw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7051 %}
7052 ins_pipe(ialu_reg_reg);
7053 %}
7054
7055 // Long Remainder
7056
7057 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
7058 match(Set dst (ModL src1 src2));
7059 ins_cost(IDIVDI_COST);
7060 format %{ "rem $dst, $src1, $src2\t#@modL" %}
7061
7062 ins_encode %{
7063 __ rem(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7064 %}
7065 ins_pipe(ialu_reg_reg);
7066 %}
7067
7068 instruct UmodL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
7069 match(Set dst (UModL src1 src2));
7070 ins_cost(IDIVDI_COST);
7071 format %{ "remu $dst, $src1, $src2\t#@UmodL" %}
7072
7073 ins_encode %{
7074 __ remu(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg));
7075 %}
7076 ins_pipe(ialu_reg_reg);
7077 %}
7078
7079 // Integer Shifts
7080
7081 // Shift Left Register
7082 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
7083 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7084 match(Set dst (LShiftI src1 src2));
7085 ins_cost(ALU_COST);
7086 format %{ "sllw $dst, $src1, $src2\t#@lShiftI_reg_reg" %}
7087
7088 ins_encode %{
7089 __ sllw(as_Register($dst$$reg),
7090 as_Register($src1$$reg),
7091 as_Register($src2$$reg));
7092 %}
7093
7094 ins_pipe(ialu_reg_reg_vshift);
7095 %}
7096
7097 // Shift Left Immediate
7098 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
7099 match(Set dst (LShiftI src1 src2));
7100 ins_cost(ALU_COST);
7101 format %{ "slliw $dst, $src1, ($src2 & 0x1f)\t#@lShiftI_reg_imm" %}
7102
7103 ins_encode %{
7104 // the shift amount is encoded in the lower
7105 // 5 bits of the I-immediate field for RV32I
7106 __ slliw(as_Register($dst$$reg),
7107 as_Register($src1$$reg),
7108 (unsigned) $src2$$constant & 0x1f);
7109 %}
7110
7111 ins_pipe(ialu_reg_shift);
7112 %}
7113
7114 // Shift Right Logical Register
7115 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
7116 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7117 match(Set dst (URShiftI src1 src2));
7118 ins_cost(ALU_COST);
7119 format %{ "srlw $dst, $src1, $src2\t#@urShiftI_reg_reg" %}
7120
7121 ins_encode %{
7122 __ srlw(as_Register($dst$$reg),
7123 as_Register($src1$$reg),
7124 as_Register($src2$$reg));
7125 %}
7126
7127 ins_pipe(ialu_reg_reg_vshift);
7128 %}
7129
7130 // Shift Right Logical Immediate
7131 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
7132 match(Set dst (URShiftI src1 src2));
7133 ins_cost(ALU_COST);
7134 format %{ "srliw $dst, $src1, ($src2 & 0x1f)\t#@urShiftI_reg_imm" %}
7135
7136 ins_encode %{
7137 // the shift amount is encoded in the lower
7138 // 6 bits of the I-immediate field for RV64I
7139 __ srliw(as_Register($dst$$reg),
7140 as_Register($src1$$reg),
7141 (unsigned) $src2$$constant & 0x1f);
7142 %}
7143
7144 ins_pipe(ialu_reg_shift);
7145 %}
7146
7147 // Shift Right Arithmetic Register
7148 // Only the low 5 bits of src2 are considered for the shift amount, all other bits are ignored.
7149 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
7150 match(Set dst (RShiftI src1 src2));
7151 ins_cost(ALU_COST);
7152 format %{ "sraw $dst, $src1, $src2\t#@rShiftI_reg_reg" %}
7153
7154 ins_encode %{
7155 // riscv will sign-ext dst high 32 bits
7156 __ sraw(as_Register($dst$$reg),
7157 as_Register($src1$$reg),
7158 as_Register($src2$$reg));
7159 %}
7160
7161 ins_pipe(ialu_reg_reg_vshift);
7162 %}
7163
7164 // Shift Right Arithmetic Immediate
7165 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
7166 match(Set dst (RShiftI src1 src2));
7167 ins_cost(ALU_COST);
7168 format %{ "sraiw $dst, $src1, ($src2 & 0x1f)\t#@rShiftI_reg_imm" %}
7169
7170 ins_encode %{
7171 // riscv will sign-ext dst high 32 bits
7172 __ sraiw(as_Register($dst$$reg),
7173 as_Register($src1$$reg),
7174 (unsigned) $src2$$constant & 0x1f);
7175 %}
7176
7177 ins_pipe(ialu_reg_shift);
7178 %}
7179
7180 // Long Shifts
7181
7182 // Shift Left Register
7183 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
7184 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
7185 match(Set dst (LShiftL src1 src2));
7186
7187 ins_cost(ALU_COST);
7188 format %{ "sll $dst, $src1, $src2\t#@lShiftL_reg_reg" %}
7189
7190 ins_encode %{
7191 __ sll(as_Register($dst$$reg),
7192 as_Register($src1$$reg),
7193 as_Register($src2$$reg));
7194 %}
7195
7196 ins_pipe(ialu_reg_reg_vshift);
7197 %}
7198
7199 // Shift Left Immediate
7200 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
7201 match(Set dst (LShiftL src1 src2));
7202
7203 ins_cost(ALU_COST);
7204 format %{ "slli $dst, $src1, ($src2 & 0x3f)\t#@lShiftL_reg_imm" %}
7205
7206 ins_encode %{
7207 // the shift amount is encoded in the lower
7208 // 6 bits of the I-immediate field for RV64I
7209 __ slli(as_Register($dst$$reg),
7210 as_Register($src1$$reg),
7211 (unsigned) $src2$$constant & 0x3f);
7212 %}
7213
7214 ins_pipe(ialu_reg_shift);
7215 %}
7216
7217 // Shift Right Logical Register
7218 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
7219 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
7220 match(Set dst (URShiftL src1 src2));
7221
7222 ins_cost(ALU_COST);
7223 format %{ "srl $dst, $src1, $src2\t#@urShiftL_reg_reg" %}
7224
7225 ins_encode %{
7226 __ srl(as_Register($dst$$reg),
7227 as_Register($src1$$reg),
7228 as_Register($src2$$reg));
7229 %}
7230
7231 ins_pipe(ialu_reg_reg_vshift);
7232 %}
7233
7234 // Shift Right Logical Immediate
7235 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
7236 match(Set dst (URShiftL src1 src2));
7237
7238 ins_cost(ALU_COST);
7239 format %{ "srli $dst, $src1, ($src2 & 0x3f)\t#@urShiftL_reg_imm" %}
7240
7241 ins_encode %{
7242 // the shift amount is encoded in the lower
7243 // 6 bits of the I-immediate field for RV64I
7244 __ srli(as_Register($dst$$reg),
7245 as_Register($src1$$reg),
7246 (unsigned) $src2$$constant & 0x3f);
7247 %}
7248
7249 ins_pipe(ialu_reg_shift);
7250 %}
7251
7252 // A special-case pattern for card table stores.
7253 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
7254 match(Set dst (URShiftL (CastP2X src1) src2));
7255
7256 ins_cost(ALU_COST);
7257 format %{ "srli $dst, p2x($src1), ($src2 & 0x3f)\t#@urShiftP_reg_imm" %}
7258
7259 ins_encode %{
7260 // the shift amount is encoded in the lower
7261 // 6 bits of the I-immediate field for RV64I
7262 __ srli(as_Register($dst$$reg),
7263 as_Register($src1$$reg),
7264 (unsigned) $src2$$constant & 0x3f);
7265 %}
7266
7267 ins_pipe(ialu_reg_shift);
7268 %}
7269
7270 // Shift Right Arithmetic Register
7271 // Only the low 6 bits of src2 are considered for the shift amount, all other bits are ignored.
7272 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
7273 match(Set dst (RShiftL src1 src2));
7274
7275 ins_cost(ALU_COST);
7276 format %{ "sra $dst, $src1, $src2\t#@rShiftL_reg_reg" %}
7277
7278 ins_encode %{
7279 __ sra(as_Register($dst$$reg),
7280 as_Register($src1$$reg),
7281 as_Register($src2$$reg));
7282 %}
7283
7284 ins_pipe(ialu_reg_reg_vshift);
7285 %}
7286
7287 // Shift Right Arithmetic Immediate
7288 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
7289 match(Set dst (RShiftL src1 src2));
7290
7291 ins_cost(ALU_COST);
7292 format %{ "srai $dst, $src1, ($src2 & 0x3f)\t#@rShiftL_reg_imm" %}
7293
7294 ins_encode %{
7295 // the shift amount is encoded in the lower
7296 // 6 bits of the I-immediate field for RV64I
7297 __ srai(as_Register($dst$$reg),
7298 as_Register($src1$$reg),
7299 (unsigned) $src2$$constant & 0x3f);
7300 %}
7301
7302 ins_pipe(ialu_reg_shift);
7303 %}
7304
7305 instruct regI_not_reg(iRegINoSp dst, iRegI src1, immI_M1 m1) %{
7306 match(Set dst (XorI src1 m1));
7307 ins_cost(ALU_COST);
7308 format %{ "xori $dst, $src1, -1\t#@regI_not_reg" %}
7309
7310 ins_encode %{
7311 __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
7312 %}
7313
7314 ins_pipe(ialu_reg_imm);
7315 %}
7316
7317 instruct regL_not_reg(iRegLNoSp dst, iRegL src1, immL_M1 m1) %{
7318 match(Set dst (XorL src1 m1));
7319 ins_cost(ALU_COST);
7320 format %{ "xori $dst, $src1, -1\t#@regL_not_reg" %}
7321
7322 ins_encode %{
7323 __ xori(as_Register($dst$$reg), as_Register($src1$$reg), -1);
7324 %}
7325
7326 ins_pipe(ialu_reg_imm);
7327 %}
7328
7329
7330 // ============================================================================
7331 // Floating Point Arithmetic Instructions
7332
7333 instruct addF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7334 match(Set dst (AddF src1 src2));
7335
7336 ins_cost(DEFAULT_COST * 5);
7337 format %{ "fadd.s $dst, $src1, $src2\t#@addF_reg_reg" %}
7338
7339 ins_encode %{
7340 __ fadd_s(as_FloatRegister($dst$$reg),
7341 as_FloatRegister($src1$$reg),
7342 as_FloatRegister($src2$$reg));
7343 %}
7344
7345 ins_pipe(fp_dop_reg_reg_s);
7346 %}
7347
7348 instruct addD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7349 match(Set dst (AddD src1 src2));
7350
7351 ins_cost(DEFAULT_COST * 5);
7352 format %{ "fadd.d $dst, $src1, $src2\t#@addD_reg_reg" %}
7353
7354 ins_encode %{
7355 __ fadd_d(as_FloatRegister($dst$$reg),
7356 as_FloatRegister($src1$$reg),
7357 as_FloatRegister($src2$$reg));
7358 %}
7359
7360 ins_pipe(fp_dop_reg_reg_d);
7361 %}
7362
7363 instruct subF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7364 match(Set dst (SubF src1 src2));
7365
7366 ins_cost(DEFAULT_COST * 5);
7367 format %{ "fsub.s $dst, $src1, $src2\t#@subF_reg_reg" %}
7368
7369 ins_encode %{
7370 __ fsub_s(as_FloatRegister($dst$$reg),
7371 as_FloatRegister($src1$$reg),
7372 as_FloatRegister($src2$$reg));
7373 %}
7374
7375 ins_pipe(fp_dop_reg_reg_s);
7376 %}
7377
7378 instruct subD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7379 match(Set dst (SubD src1 src2));
7380
7381 ins_cost(DEFAULT_COST * 5);
7382 format %{ "fsub.d $dst, $src1, $src2\t#@subD_reg_reg" %}
7383
7384 ins_encode %{
7385 __ fsub_d(as_FloatRegister($dst$$reg),
7386 as_FloatRegister($src1$$reg),
7387 as_FloatRegister($src2$$reg));
7388 %}
7389
7390 ins_pipe(fp_dop_reg_reg_d);
7391 %}
7392
7393 instruct mulF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7394 match(Set dst (MulF src1 src2));
7395
7396 ins_cost(FMUL_SINGLE_COST);
7397 format %{ "fmul.s $dst, $src1, $src2\t#@mulF_reg_reg" %}
7398
7399 ins_encode %{
7400 __ fmul_s(as_FloatRegister($dst$$reg),
7401 as_FloatRegister($src1$$reg),
7402 as_FloatRegister($src2$$reg));
7403 %}
7404
7405 ins_pipe(fp_dop_reg_reg_s);
7406 %}
7407
7408 instruct mulD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7409 match(Set dst (MulD src1 src2));
7410
7411 ins_cost(FMUL_DOUBLE_COST);
7412 format %{ "fmul.d $dst, $src1, $src2\t#@mulD_reg_reg" %}
7413
7414 ins_encode %{
7415 __ fmul_d(as_FloatRegister($dst$$reg),
7416 as_FloatRegister($src1$$reg),
7417 as_FloatRegister($src2$$reg));
7418 %}
7419
7420 ins_pipe(fp_dop_reg_reg_d);
7421 %}
7422
7423 // src1 * src2 + src3
7424 instruct maddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7425 match(Set dst (FmaF src3 (Binary src1 src2)));
7426
7427 ins_cost(FMUL_SINGLE_COST);
7428 format %{ "fmadd.s $dst, $src1, $src2, $src3\t#@maddF_reg_reg" %}
7429
7430 ins_encode %{
7431 assert(UseFMA, "Needs FMA instructions support.");
7432 __ fmadd_s(as_FloatRegister($dst$$reg),
7433 as_FloatRegister($src1$$reg),
7434 as_FloatRegister($src2$$reg),
7435 as_FloatRegister($src3$$reg));
7436 %}
7437
7438 ins_pipe(pipe_class_default);
7439 %}
7440
7441 // src1 * src2 + src3
7442 instruct maddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7443 match(Set dst (FmaD src3 (Binary src1 src2)));
7444
7445 ins_cost(FMUL_DOUBLE_COST);
7446 format %{ "fmadd.d $dst, $src1, $src2, $src3\t#@maddD_reg_reg" %}
7447
7448 ins_encode %{
7449 assert(UseFMA, "Needs FMA instructions support.");
7450 __ fmadd_d(as_FloatRegister($dst$$reg),
7451 as_FloatRegister($src1$$reg),
7452 as_FloatRegister($src2$$reg),
7453 as_FloatRegister($src3$$reg));
7454 %}
7455
7456 ins_pipe(pipe_class_default);
7457 %}
7458
7459 // src1 * src2 - src3
7460 instruct msubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7461 match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
7462
7463 ins_cost(FMUL_SINGLE_COST);
7464 format %{ "fmsub.s $dst, $src1, $src2, $src3\t#@msubF_reg_reg" %}
7465
7466 ins_encode %{
7467 assert(UseFMA, "Needs FMA instructions support.");
7468 __ fmsub_s(as_FloatRegister($dst$$reg),
7469 as_FloatRegister($src1$$reg),
7470 as_FloatRegister($src2$$reg),
7471 as_FloatRegister($src3$$reg));
7472 %}
7473
7474 ins_pipe(pipe_class_default);
7475 %}
7476
7477 // src1 * src2 - src3
7478 instruct msubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7479 match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
7480
7481 ins_cost(FMUL_DOUBLE_COST);
7482 format %{ "fmsub.d $dst, $src1, $src2, $src3\t#@msubD_reg_reg" %}
7483
7484 ins_encode %{
7485 assert(UseFMA, "Needs FMA instructions support.");
7486 __ fmsub_d(as_FloatRegister($dst$$reg),
7487 as_FloatRegister($src1$$reg),
7488 as_FloatRegister($src2$$reg),
7489 as_FloatRegister($src3$$reg));
7490 %}
7491
7492 ins_pipe(pipe_class_default);
7493 %}
7494
7495 // src1 * (-src2) + src3
7496 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
7497 instruct nmsubF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7498 match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
7499
7500 ins_cost(FMUL_SINGLE_COST);
7501 format %{ "fnmsub.s $dst, $src1, $src2, $src3\t#@nmsubF_reg_reg" %}
7502
7503 ins_encode %{
7504 assert(UseFMA, "Needs FMA instructions support.");
7505 __ fnmsub_s(as_FloatRegister($dst$$reg),
7506 as_FloatRegister($src1$$reg),
7507 as_FloatRegister($src2$$reg),
7508 as_FloatRegister($src3$$reg));
7509 %}
7510
7511 ins_pipe(pipe_class_default);
7512 %}
7513
7514 // src1 * (-src2) + src3
7515 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
7516 instruct nmsubD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7517 match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
7518
7519 ins_cost(FMUL_DOUBLE_COST);
7520 format %{ "fnmsub.d $dst, $src1, $src2, $src3\t#@nmsubD_reg_reg" %}
7521
7522 ins_encode %{
7523 assert(UseFMA, "Needs FMA instructions support.");
7524 __ fnmsub_d(as_FloatRegister($dst$$reg),
7525 as_FloatRegister($src1$$reg),
7526 as_FloatRegister($src2$$reg),
7527 as_FloatRegister($src3$$reg));
7528 %}
7529
7530 ins_pipe(pipe_class_default);
7531 %}
7532
7533 // src1 * (-src2) - src3
7534 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
7535 instruct nmaddF_reg_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3) %{
7536 match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
7537
7538 ins_cost(FMUL_SINGLE_COST);
7539 format %{ "fnmadd.s $dst, $src1, $src2, $src3\t#@nmaddF_reg_reg" %}
7540
7541 ins_encode %{
7542 assert(UseFMA, "Needs FMA instructions support.");
7543 __ fnmadd_s(as_FloatRegister($dst$$reg),
7544 as_FloatRegister($src1$$reg),
7545 as_FloatRegister($src2$$reg),
7546 as_FloatRegister($src3$$reg));
7547 %}
7548
7549 ins_pipe(pipe_class_default);
7550 %}
7551
7552 // src1 * (-src2) - src3
7553 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
7554 instruct nmaddD_reg_reg(fRegD dst, fRegD src1, fRegD src2, fRegD src3) %{
7555 match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
7556
7557 ins_cost(FMUL_DOUBLE_COST);
7558 format %{ "fnmadd.d $dst, $src1, $src2, $src3\t#@nmaddD_reg_reg" %}
7559
7560 ins_encode %{
7561 assert(UseFMA, "Needs FMA instructions support.");
7562 __ fnmadd_d(as_FloatRegister($dst$$reg),
7563 as_FloatRegister($src1$$reg),
7564 as_FloatRegister($src2$$reg),
7565 as_FloatRegister($src3$$reg));
7566 %}
7567
7568 ins_pipe(pipe_class_default);
7569 %}
7570
7571 // Math.max(FF)F
7572 instruct maxF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
7573 predicate(!UseZfa);
7574 match(Set dst (MaxF src1 src2));
7575 effect(KILL cr);
7576
7577 format %{ "maxF $dst, $src1, $src2" %}
7578
7579 ins_encode %{
7580 __ minmax_fp(as_FloatRegister($dst$$reg),
7581 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7582 __ FLOAT_TYPE::single_precision, false /* is_min */);
7583 %}
7584
7585 ins_pipe(pipe_class_default);
7586 %}
7587
7588 instruct maxF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
7589 predicate(UseZfa);
7590 match(Set dst (MaxF src1 src2));
7591
7592 format %{ "maxF $dst, $src1, $src2" %}
7593
7594 ins_encode %{
7595 __ fmaxm_s(as_FloatRegister($dst$$reg),
7596 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7597 %}
7598
7599 ins_pipe(pipe_class_default);
7600 %}
7601
7602 // Math.min(FF)F
7603 instruct minF_reg_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr) %{
7604 predicate(!UseZfa);
7605 match(Set dst (MinF src1 src2));
7606 effect(KILL cr);
7607
7608 format %{ "minF $dst, $src1, $src2" %}
7609
7610 ins_encode %{
7611 __ minmax_fp(as_FloatRegister($dst$$reg),
7612 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7613 __ FLOAT_TYPE::single_precision, true /* is_min */);
7614 %}
7615
7616 ins_pipe(pipe_class_default);
7617 %}
7618
7619 instruct minF_reg_reg_zfa(fRegF dst, fRegF src1, fRegF src2) %{
7620 predicate(UseZfa);
7621 match(Set dst (MinF src1 src2));
7622
7623 format %{ "minF $dst, $src1, $src2" %}
7624
7625 ins_encode %{
7626 __ fminm_s(as_FloatRegister($dst$$reg),
7627 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7628 %}
7629
7630 ins_pipe(pipe_class_default);
7631 %}
7632
7633 // Math.max(DD)D
7634 instruct maxD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
7635 predicate(!UseZfa);
7636 match(Set dst (MaxD src1 src2));
7637 effect(KILL cr);
7638
7639 format %{ "maxD $dst, $src1, $src2" %}
7640
7641 ins_encode %{
7642 __ minmax_fp(as_FloatRegister($dst$$reg),
7643 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7644 __ FLOAT_TYPE::double_precision, false /* is_min */);
7645 %}
7646
7647 ins_pipe(pipe_class_default);
7648 %}
7649
7650 instruct maxD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
7651 predicate(UseZfa);
7652 match(Set dst (MaxD src1 src2));
7653
7654 format %{ "maxD $dst, $src1, $src2" %}
7655
7656 ins_encode %{
7657 __ fmaxm_d(as_FloatRegister($dst$$reg),
7658 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7659 %}
7660
7661 ins_pipe(pipe_class_default);
7662 %}
7663
7664 // Math.min(DD)D
7665 instruct minD_reg_reg(fRegD dst, fRegD src1, fRegD src2, rFlagsReg cr) %{
7666 predicate(!UseZfa);
7667 match(Set dst (MinD src1 src2));
7668 effect(KILL cr);
7669
7670 format %{ "minD $dst, $src1, $src2" %}
7671
7672 ins_encode %{
7673 __ minmax_fp(as_FloatRegister($dst$$reg),
7674 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg),
7675 __ FLOAT_TYPE::double_precision, true /* is_min */);
7676 %}
7677
7678 ins_pipe(pipe_class_default);
7679 %}
7680
7681 instruct minD_reg_reg_zfa(fRegD dst, fRegD src1, fRegD src2) %{
7682 predicate(UseZfa);
7683 match(Set dst (MinD src1 src2));
7684
7685 format %{ "minD $dst, $src1, $src2" %}
7686
7687 ins_encode %{
7688 __ fminm_d(as_FloatRegister($dst$$reg),
7689 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
7690 %}
7691
7692 ins_pipe(pipe_class_default);
7693 %}
7694
7695 // Float.isInfinite
7696 instruct isInfiniteF_reg_reg(iRegINoSp dst, fRegF src)
7697 %{
7698 match(Set dst (IsInfiniteF src));
7699
7700 format %{ "isInfinite $dst, $src" %}
7701 ins_encode %{
7702 __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7703 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
7704 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7705 %}
7706
7707 ins_pipe(pipe_class_default);
7708 %}
7709
7710 // Double.isInfinite
7711 instruct isInfiniteD_reg_reg(iRegINoSp dst, fRegD src)
7712 %{
7713 match(Set dst (IsInfiniteD src));
7714
7715 format %{ "isInfinite $dst, $src" %}
7716 ins_encode %{
7717 __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7718 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::inf);
7719 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7720 %}
7721
7722 ins_pipe(pipe_class_default);
7723 %}
7724
7725 // Float.isFinite
7726 instruct isFiniteF_reg_reg(iRegINoSp dst, fRegF src)
7727 %{
7728 match(Set dst (IsFiniteF src));
7729
7730 format %{ "isFinite $dst, $src" %}
7731 ins_encode %{
7732 __ fclass_s(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7733 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
7734 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7735 %}
7736
7737 ins_pipe(pipe_class_default);
7738 %}
7739
7740 // Double.isFinite
7741 instruct isFiniteD_reg_reg(iRegINoSp dst, fRegD src)
7742 %{
7743 match(Set dst (IsFiniteD src));
7744
7745 format %{ "isFinite $dst, $src" %}
7746 ins_encode %{
7747 __ fclass_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
7748 __ andi(as_Register($dst$$reg), as_Register($dst$$reg), Assembler::FClassBits::finite);
7749 __ slt(as_Register($dst$$reg), zr, as_Register($dst$$reg));
7750 %}
7751
7752 ins_pipe(pipe_class_default);
7753 %}
7754
7755 instruct divF_reg_reg(fRegF dst, fRegF src1, fRegF src2) %{
7756 match(Set dst (DivF src1 src2));
7757
7758 ins_cost(FDIV_COST);
7759 format %{ "fdiv.s $dst, $src1, $src2\t#@divF_reg_reg" %}
7760
7761 ins_encode %{
7762 __ fdiv_s(as_FloatRegister($dst$$reg),
7763 as_FloatRegister($src1$$reg),
7764 as_FloatRegister($src2$$reg));
7765 %}
7766
7767 ins_pipe(fp_div_s);
7768 %}
7769
7770 instruct divD_reg_reg(fRegD dst, fRegD src1, fRegD src2) %{
7771 match(Set dst (DivD src1 src2));
7772
7773 ins_cost(FDIV_COST);
7774 format %{ "fdiv.d $dst, $src1, $src2\t#@divD_reg_reg" %}
7775
7776 ins_encode %{
7777 __ fdiv_d(as_FloatRegister($dst$$reg),
7778 as_FloatRegister($src1$$reg),
7779 as_FloatRegister($src2$$reg));
7780 %}
7781
7782 ins_pipe(fp_div_d);
7783 %}
7784
7785 instruct negF_reg_reg(fRegF dst, fRegF src) %{
7786 match(Set dst (NegF src));
7787
7788 ins_cost(XFER_COST);
7789 format %{ "fsgnjn.s $dst, $src, $src\t#@negF_reg_reg" %}
7790
7791 ins_encode %{
7792 __ fneg_s(as_FloatRegister($dst$$reg),
7793 as_FloatRegister($src$$reg));
7794 %}
7795
7796 ins_pipe(fp_uop_s);
7797 %}
7798
7799 instruct negD_reg_reg(fRegD dst, fRegD src) %{
7800 match(Set dst (NegD src));
7801
7802 ins_cost(XFER_COST);
7803 format %{ "fsgnjn.d $dst, $src, $src\t#@negD_reg_reg" %}
7804
7805 ins_encode %{
7806 __ fneg_d(as_FloatRegister($dst$$reg),
7807 as_FloatRegister($src$$reg));
7808 %}
7809
7810 ins_pipe(fp_uop_d);
7811 %}
7812
7813 instruct absI_reg(iRegINoSp dst, iRegIorL2I src) %{
7814 match(Set dst (AbsI src));
7815
7816 ins_cost(ALU_COST * 3);
7817 format %{
7818 "sraiw t0, $src, 0x1f\n\t"
7819 "addw $dst, $src, t0\n\t"
7820 "xorr $dst, $dst, t0\t#@absI_reg"
7821 %}
7822
7823 ins_encode %{
7824 __ sraiw(t0, as_Register($src$$reg), 0x1f);
7825 __ addw(as_Register($dst$$reg), as_Register($src$$reg), t0);
7826 __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
7827 %}
7828
7829 ins_pipe(pipe_class_default);
7830 %}
7831
7832 instruct absL_reg(iRegLNoSp dst, iRegL src) %{
7833 match(Set dst (AbsL src));
7834
7835 ins_cost(ALU_COST * 3);
7836 format %{
7837 "srai t0, $src, 0x3f\n\t"
7838 "add $dst, $src, t0\n\t"
7839 "xorr $dst, $dst, t0\t#@absL_reg"
7840 %}
7841
7842 ins_encode %{
7843 __ srai(t0, as_Register($src$$reg), 0x3f);
7844 __ add(as_Register($dst$$reg), as_Register($src$$reg), t0);
7845 __ xorr(as_Register($dst$$reg), as_Register($dst$$reg), t0);
7846 %}
7847
7848 ins_pipe(pipe_class_default);
7849 %}
7850
7851 instruct absF_reg(fRegF dst, fRegF src) %{
7852 match(Set dst (AbsF src));
7853
7854 ins_cost(XFER_COST);
7855 format %{ "fsgnjx.s $dst, $src, $src\t#@absF_reg" %}
7856 ins_encode %{
7857 __ fabs_s(as_FloatRegister($dst$$reg),
7858 as_FloatRegister($src$$reg));
7859 %}
7860
7861 ins_pipe(fp_uop_s);
7862 %}
7863
7864 instruct absD_reg(fRegD dst, fRegD src) %{
7865 match(Set dst (AbsD src));
7866
7867 ins_cost(XFER_COST);
7868 format %{ "fsgnjx.d $dst, $src, $src\t#@absD_reg" %}
7869 ins_encode %{
7870 __ fabs_d(as_FloatRegister($dst$$reg),
7871 as_FloatRegister($src$$reg));
7872 %}
7873
7874 ins_pipe(fp_uop_d);
7875 %}
7876
7877 instruct sqrtF_reg(fRegF dst, fRegF src) %{
7878 match(Set dst (SqrtF src));
7879
7880 ins_cost(FSQRT_COST);
7881 format %{ "fsqrt.s $dst, $src\t#@sqrtF_reg" %}
7882 ins_encode %{
7883 __ fsqrt_s(as_FloatRegister($dst$$reg),
7884 as_FloatRegister($src$$reg));
7885 %}
7886
7887 ins_pipe(fp_sqrt_s);
7888 %}
7889
7890 instruct sqrtD_reg(fRegD dst, fRegD src) %{
7891 match(Set dst (SqrtD src));
7892
7893 ins_cost(FSQRT_COST);
7894 format %{ "fsqrt.d $dst, $src\t#@sqrtD_reg" %}
7895 ins_encode %{
7896 __ fsqrt_d(as_FloatRegister($dst$$reg),
7897 as_FloatRegister($src$$reg));
7898 %}
7899
7900 ins_pipe(fp_sqrt_d);
7901 %}
7902
7903 // Round Instruction
7904 instruct roundD_reg(fRegD dst, fRegD src, immI rmode, iRegLNoSp tmp1, iRegLNoSp tmp2, iRegLNoSp tmp3) %{
7905 match(Set dst (RoundDoubleMode src rmode));
7906 ins_cost(2 * XFER_COST + BRANCH_COST);
7907 effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3);
7908
7909 format %{ "RoundDoubleMode $src, $rmode" %}
7910 ins_encode %{
7911 __ round_double_mode(as_FloatRegister($dst$$reg),
7912 as_FloatRegister($src$$reg), $rmode$$constant, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
7913 %}
7914 ins_pipe(pipe_class_default);
7915 %}
7916
7917 // Copysign and signum intrinsics
7918
7919 instruct copySignD_reg(fRegD dst, fRegD src1, fRegD src2, immD zero) %{
7920 match(Set dst (CopySignD src1 (Binary src2 zero)));
7921 format %{ "CopySignD $dst $src1 $src2" %}
7922 ins_encode %{
7923 FloatRegister dst = as_FloatRegister($dst$$reg),
7924 src1 = as_FloatRegister($src1$$reg),
7925 src2 = as_FloatRegister($src2$$reg);
7926 __ fsgnj_d(dst, src1, src2);
7927 %}
7928 ins_pipe(fp_dop_reg_reg_d);
7929 %}
7930
7931 instruct copySignF_reg(fRegF dst, fRegF src1, fRegF src2) %{
7932 match(Set dst (CopySignF src1 src2));
7933 format %{ "CopySignF $dst $src1 $src2" %}
7934 ins_encode %{
7935 FloatRegister dst = as_FloatRegister($dst$$reg),
7936 src1 = as_FloatRegister($src1$$reg),
7937 src2 = as_FloatRegister($src2$$reg);
7938 __ fsgnj_s(dst, src1, src2);
7939 %}
7940 ins_pipe(fp_dop_reg_reg_s);
7941 %}
7942
7943 instruct signumD_reg(fRegD dst, immD zero, fRegD one) %{
7944 match(Set dst (SignumD dst (Binary zero one)));
7945 format %{ "signumD $dst, $dst" %}
7946 ins_encode %{
7947 __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), true /* is_double */);
7948 %}
7949 ins_pipe(pipe_class_default);
7950 %}
7951
7952 instruct signumF_reg(fRegF dst, immF zero, fRegF one) %{
7953 match(Set dst (SignumF dst (Binary zero one)));
7954 format %{ "signumF $dst, $dst" %}
7955 ins_encode %{
7956 __ signum_fp(as_FloatRegister($dst$$reg), as_FloatRegister($one$$reg), false /* is_double */);
7957 %}
7958 ins_pipe(pipe_class_default);
7959 %}
7960
7961 // Arithmetic Instructions End
7962
7963 // ============================================================================
7964 // Logical Instructions
7965
7966 // Register And
7967 instruct andI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
7968 match(Set dst (AndI src1 src2));
7969
7970 format %{ "andr $dst, $src1, $src2\t#@andI_reg_reg" %}
7971
7972 ins_cost(ALU_COST);
7973 ins_encode %{
7974 __ andr(as_Register($dst$$reg),
7975 as_Register($src1$$reg),
7976 as_Register($src2$$reg));
7977 %}
7978
7979 ins_pipe(ialu_reg_reg);
7980 %}
7981
7982 // Immediate And
7983 instruct andI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
7984 match(Set dst (AndI src1 src2));
7985
7986 format %{ "andi $dst, $src1, $src2\t#@andI_reg_imm" %}
7987
7988 ins_cost(ALU_COST);
7989 ins_encode %{
7990 __ andi(as_Register($dst$$reg),
7991 as_Register($src1$$reg),
7992 (int32_t)($src2$$constant));
7993 %}
7994
7995 ins_pipe(ialu_reg_imm);
7996 %}
7997
7998 // Register Or
7999 instruct orI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
8000 match(Set dst (OrI src1 src2));
8001
8002 format %{ "orr $dst, $src1, $src2\t#@orI_reg_reg" %}
8003
8004 ins_cost(ALU_COST);
8005 ins_encode %{
8006 __ orr(as_Register($dst$$reg),
8007 as_Register($src1$$reg),
8008 as_Register($src2$$reg));
8009 %}
8010
8011 ins_pipe(ialu_reg_reg);
8012 %}
8013
8014 // Immediate Or
8015 instruct orI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
8016 match(Set dst (OrI src1 src2));
8017
8018 format %{ "ori $dst, $src1, $src2\t#@orI_reg_imm" %}
8019
8020 ins_cost(ALU_COST);
8021 ins_encode %{
8022 __ ori(as_Register($dst$$reg),
8023 as_Register($src1$$reg),
8024 (int32_t)($src2$$constant));
8025 %}
8026
8027 ins_pipe(ialu_reg_imm);
8028 %}
8029
8030 // Register Xor
8031 instruct xorI_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2) %{
8032 match(Set dst (XorI src1 src2));
8033
8034 format %{ "xorr $dst, $src1, $src2\t#@xorI_reg_reg" %}
8035
8036 ins_cost(ALU_COST);
8037 ins_encode %{
8038 __ xorr(as_Register($dst$$reg),
8039 as_Register($src1$$reg),
8040 as_Register($src2$$reg));
8041 %}
8042
8043 ins_pipe(ialu_reg_reg);
8044 %}
8045
8046 // Immediate Xor
8047 instruct xorI_reg_imm(iRegINoSp dst, iRegI src1, immIAdd src2) %{
8048 match(Set dst (XorI src1 src2));
8049
8050 format %{ "xori $dst, $src1, $src2\t#@xorI_reg_imm" %}
8051
8052 ins_cost(ALU_COST);
8053 ins_encode %{
8054 __ xori(as_Register($dst$$reg),
8055 as_Register($src1$$reg),
8056 (int32_t)($src2$$constant));
8057 %}
8058
8059 ins_pipe(ialu_reg_imm);
8060 %}
8061
8062 // Register And Long
8063 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8064 match(Set dst (AndL src1 src2));
8065
8066 format %{ "andr $dst, $src1, $src2\t#@andL_reg_reg" %}
8067
8068 ins_cost(ALU_COST);
8069 ins_encode %{
8070 __ andr(as_Register($dst$$reg),
8071 as_Register($src1$$reg),
8072 as_Register($src2$$reg));
8073 %}
8074
8075 ins_pipe(ialu_reg_reg);
8076 %}
8077
8078 // Immediate And Long
8079 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
8080 match(Set dst (AndL src1 src2));
8081
8082 format %{ "andi $dst, $src1, $src2\t#@andL_reg_imm" %}
8083
8084 ins_cost(ALU_COST);
8085 ins_encode %{
8086 __ andi(as_Register($dst$$reg),
8087 as_Register($src1$$reg),
8088 (int32_t)($src2$$constant));
8089 %}
8090
8091 ins_pipe(ialu_reg_imm);
8092 %}
8093
8094 // Register Or Long
8095 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8096 match(Set dst (OrL src1 src2));
8097
8098 format %{ "orr $dst, $src1, $src2\t#@orL_reg_reg" %}
8099
8100 ins_cost(ALU_COST);
8101 ins_encode %{
8102 __ orr(as_Register($dst$$reg),
8103 as_Register($src1$$reg),
8104 as_Register($src2$$reg));
8105 %}
8106
8107 ins_pipe(ialu_reg_reg);
8108 %}
8109
8110 // Immediate Or Long
8111 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
8112 match(Set dst (OrL src1 src2));
8113
8114 format %{ "ori $dst, $src1, $src2\t#@orL_reg_imm" %}
8115
8116 ins_cost(ALU_COST);
8117 ins_encode %{
8118 __ ori(as_Register($dst$$reg),
8119 as_Register($src1$$reg),
8120 (int32_t)($src2$$constant));
8121 %}
8122
8123 ins_pipe(ialu_reg_imm);
8124 %}
8125
8126 // Register Xor Long
8127 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
8128 match(Set dst (XorL src1 src2));
8129
8130 format %{ "xorr $dst, $src1, $src2\t#@xorL_reg_reg" %}
8131
8132 ins_cost(ALU_COST);
8133 ins_encode %{
8134 __ xorr(as_Register($dst$$reg),
8135 as_Register($src1$$reg),
8136 as_Register($src2$$reg));
8137 %}
8138
8139 ins_pipe(ialu_reg_reg);
8140 %}
8141
8142 // Immediate Xor Long
8143 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLAdd src2) %{
8144 match(Set dst (XorL src1 src2));
8145
8146 ins_cost(ALU_COST);
8147 format %{ "xori $dst, $src1, $src2\t#@xorL_reg_imm" %}
8148
8149 ins_encode %{
8150 __ xori(as_Register($dst$$reg),
8151 as_Register($src1$$reg),
8152 (int32_t)($src2$$constant));
8153 %}
8154
8155 ins_pipe(ialu_reg_imm);
8156 %}
8157
8158 // ============================================================================
8159 // MemBar Instruction
8160
8161 // RVTSO
8162
8163 instruct unnecessary_membar_rvtso() %{
8164 predicate(UseZtso);
8165 match(LoadFence);
8166 match(StoreFence);
8167 match(StoreStoreFence);
8168 match(MemBarAcquire);
8169 match(MemBarRelease);
8170 match(MemBarStoreStore);
8171 match(MemBarAcquireLock);
8172 match(MemBarReleaseLock);
8173
8174 ins_cost(0);
8175
8176 size(0);
8177
8178 format %{ "#@unnecessary_membar_rvtso elided/tso (empty encoding)" %}
8179 ins_encode %{
8180 __ block_comment("unnecessary_membar_rvtso");
8181 %}
8182 ins_pipe(real_empty);
8183 %}
8184
8185 instruct membar_volatile_rvtso() %{
8186 predicate(UseZtso);
8187 match(MemBarVolatile);
8188 ins_cost(VOLATILE_REF_COST);
8189
8190 format %{ "#@membar_volatile_rvtso\n\t"
8191 "fence w, r"%}
8192
8193 ins_encode %{
8194 __ block_comment("membar_volatile_rvtso");
8195 __ membar(MacroAssembler::StoreLoad);
8196 %}
8197
8198 ins_pipe(pipe_slow);
8199 %}
8200
8201 instruct unnecessary_membar_volatile_rvtso() %{
8202 predicate(UseZtso && Matcher::post_store_load_barrier(n));
8203 match(MemBarVolatile);
8204 ins_cost(0);
8205
8206 size(0);
8207
8208 format %{ "#@unnecessary_membar_volatile_rvtso (unnecessary so empty encoding)" %}
8209 ins_encode %{
8210 __ block_comment("unnecessary_membar_volatile_rvtso");
8211 %}
8212 ins_pipe(real_empty);
8213 %}
8214
8215 // RVWMO
8216
8217 instruct membar_aqcuire_rvwmo() %{
8218 predicate(!UseZtso);
8219 match(LoadFence);
8220 match(MemBarAcquire);
8221 ins_cost(VOLATILE_REF_COST);
8222
8223 format %{ "#@membar_aqcuire_rvwmo\n\t"
8224 "fence r, rw" %}
8225
8226 ins_encode %{
8227 __ block_comment("membar_aqcuire_rvwmo");
8228 __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
8229 %}
8230 ins_pipe(pipe_serial);
8231 %}
8232
8233 instruct membar_release_rvwmo() %{
8234 predicate(!UseZtso);
8235 match(StoreFence);
8236 match(MemBarRelease);
8237 ins_cost(VOLATILE_REF_COST);
8238
8239 format %{ "#@membar_release_rvwmo\n\t"
8240 "fence rw, w" %}
8241
8242 ins_encode %{
8243 __ block_comment("membar_release_rvwmo");
8244 __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
8245 %}
8246 ins_pipe(pipe_serial);
8247 %}
8248
8249 instruct membar_storestore_rvwmo() %{
8250 predicate(!UseZtso);
8251 match(MemBarStoreStore);
8252 match(StoreStoreFence);
8253 ins_cost(VOLATILE_REF_COST);
8254
8255 format %{ "#@membar_storestore_rvwmo\n\t"
8256 "fence w, w" %}
8257
8258 ins_encode %{
8259 __ membar(MacroAssembler::StoreStore);
8260 %}
8261 ins_pipe(pipe_serial);
8262 %}
8263
8264 instruct membar_volatile_rvwmo() %{
8265 predicate(!UseZtso);
8266 match(MemBarVolatile);
8267 ins_cost(VOLATILE_REF_COST);
8268
8269 format %{ "#@membar_volatile_rvwmo\n\t"
8270 "fence w, r"%}
8271
8272 ins_encode %{
8273 __ block_comment("membar_volatile_rvwmo");
8274 __ membar(MacroAssembler::StoreLoad);
8275 %}
8276
8277 ins_pipe(pipe_serial);
8278 %}
8279
8280 instruct membar_lock_rvwmo() %{
8281 predicate(!UseZtso);
8282 match(MemBarAcquireLock);
8283 match(MemBarReleaseLock);
8284 ins_cost(0);
8285
8286 format %{ "#@membar_lock_rvwmo (elided)" %}
8287
8288 ins_encode %{
8289 __ block_comment("membar_lock_rvwmo (elided)");
8290 %}
8291
8292 ins_pipe(pipe_serial);
8293 %}
8294
8295 instruct unnecessary_membar_volatile_rvwmo() %{
8296 predicate(!UseZtso && Matcher::post_store_load_barrier(n));
8297 match(MemBarVolatile);
8298 ins_cost(0);
8299
8300 size(0);
8301 format %{ "#@unnecessary_membar_volatile_rvwmo (unnecessary so empty encoding)" %}
8302 ins_encode %{
8303 __ block_comment("unnecessary_membar_volatile_rvwmo");
8304 %}
8305 ins_pipe(real_empty);
8306 %}
8307
8308 instruct spin_wait() %{
8309 predicate(UseZihintpause);
8310 match(OnSpinWait);
8311 ins_cost(CACHE_MISS_COST);
8312
8313 format %{ "spin_wait" %}
8314
8315 ins_encode %{
8316 __ pause();
8317 %}
8318
8319 ins_pipe(pipe_serial);
8320 %}
8321
8322 // ============================================================================
8323 // Cast Instructions (Java-level type cast)
8324
8325 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8326 match(Set dst (CastX2P src));
8327
8328 ins_cost(ALU_COST);
8329 format %{ "mv $dst, $src\t# long -> ptr, #@castX2P" %}
8330
8331 ins_encode %{
8332 if ($dst$$reg != $src$$reg) {
8333 __ mv(as_Register($dst$$reg), as_Register($src$$reg));
8334 }
8335 %}
8336
8337 ins_pipe(ialu_reg);
8338 %}
8339
8340 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8341 match(Set dst (CastP2X src));
8342
8343 ins_cost(ALU_COST);
8344 format %{ "mv $dst, $src\t# ptr -> long, #@castP2X" %}
8345
8346 ins_encode %{
8347 if ($dst$$reg != $src$$reg) {
8348 __ mv(as_Register($dst$$reg), as_Register($src$$reg));
8349 }
8350 %}
8351
8352 ins_pipe(ialu_reg);
8353 %}
8354
8355 instruct castPP(iRegPNoSp dst)
8356 %{
8357 match(Set dst (CastPP dst));
8358 ins_cost(0);
8359
8360 size(0);
8361 format %{ "# castPP of $dst, #@castPP" %}
8362 ins_encode(/* empty encoding */);
8363 ins_pipe(pipe_class_empty);
8364 %}
8365
8366 instruct castLL(iRegL dst)
8367 %{
8368 match(Set dst (CastLL dst));
8369
8370 size(0);
8371 format %{ "# castLL of $dst, #@castLL" %}
8372 ins_encode(/* empty encoding */);
8373 ins_cost(0);
8374 ins_pipe(pipe_class_empty);
8375 %}
8376
8377 instruct castII(iRegI dst)
8378 %{
8379 match(Set dst (CastII dst));
8380
8381 size(0);
8382 format %{ "# castII of $dst, #@castII" %}
8383 ins_encode(/* empty encoding */);
8384 ins_cost(0);
8385 ins_pipe(pipe_class_empty);
8386 %}
8387
8388 instruct checkCastPP(iRegPNoSp dst)
8389 %{
8390 match(Set dst (CheckCastPP dst));
8391
8392 size(0);
8393 ins_cost(0);
8394 format %{ "# checkcastPP of $dst, #@checkCastPP" %}
8395 ins_encode(/* empty encoding */);
8396 ins_pipe(pipe_class_empty);
8397 %}
8398
8399 instruct castHH(fRegF dst)
8400 %{
8401 match(Set dst (CastHH dst));
8402
8403 size(0);
8404 format %{ "# castHH of $dst" %}
8405 ins_encode(/* empty encoding */);
8406 ins_cost(0);
8407 ins_pipe(pipe_class_empty);
8408 %}
8409
8410 instruct castFF(fRegF dst)
8411 %{
8412 match(Set dst (CastFF dst));
8413
8414 size(0);
8415 format %{ "# castFF of $dst" %}
8416 ins_encode(/* empty encoding */);
8417 ins_cost(0);
8418 ins_pipe(pipe_class_empty);
8419 %}
8420
8421 instruct castDD(fRegD dst)
8422 %{
8423 match(Set dst (CastDD dst));
8424
8425 size(0);
8426 format %{ "# castDD of $dst" %}
8427 ins_encode(/* empty encoding */);
8428 ins_cost(0);
8429 ins_pipe(pipe_class_empty);
8430 %}
8431
8432 instruct castVV(vReg dst)
8433 %{
8434 match(Set dst (CastVV dst));
8435
8436 size(0);
8437 format %{ "# castVV of $dst" %}
8438 ins_encode(/* empty encoding */);
8439 ins_cost(0);
8440 ins_pipe(pipe_class_empty);
8441 %}
8442
8443 instruct castVVMask(vRegMask dst)
8444 %{
8445 match(Set dst (CastVV dst));
8446
8447 size(0);
8448 format %{ "# castVV of $dst" %}
8449 ins_encode(/* empty encoding */);
8450 ins_cost(0);
8451 ins_pipe(pipe_class_empty);
8452 %}
8453
8454 // ============================================================================
8455 // Convert Instructions
8456
8457 // int to bool
8458 instruct convI2Bool(iRegINoSp dst, iRegI src)
8459 %{
8460 match(Set dst (Conv2B src));
8461
8462 ins_cost(ALU_COST);
8463 format %{ "snez $dst, $src\t#@convI2Bool" %}
8464
8465 ins_encode %{
8466 __ snez(as_Register($dst$$reg), as_Register($src$$reg));
8467 %}
8468
8469 ins_pipe(ialu_reg);
8470 %}
8471
8472 // pointer to bool
8473 instruct convP2Bool(iRegINoSp dst, iRegP src)
8474 %{
8475 match(Set dst (Conv2B src));
8476
8477 ins_cost(ALU_COST);
8478 format %{ "snez $dst, $src\t#@convP2Bool" %}
8479
8480 ins_encode %{
8481 __ snez(as_Register($dst$$reg), as_Register($src$$reg));
8482 %}
8483
8484 ins_pipe(ialu_reg);
8485 %}
8486
8487 // int <-> long
8488
8489 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
8490 %{
8491 match(Set dst (ConvI2L src));
8492
8493 ins_cost(ALU_COST);
8494 format %{ "addw $dst, $src, zr\t#@convI2L_reg_reg" %}
8495 ins_encode %{
8496 __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
8497 %}
8498 ins_pipe(ialu_reg);
8499 %}
8500
8501 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
8502 match(Set dst (ConvL2I src));
8503
8504 ins_cost(ALU_COST);
8505 format %{ "addw $dst, $src, zr\t#@convL2I_reg" %}
8506
8507 ins_encode %{
8508 __ sext(as_Register($dst$$reg), as_Register($src$$reg), 32);
8509 %}
8510
8511 ins_pipe(ialu_reg);
8512 %}
8513
8514 // int to unsigned long (Zero-extend)
8515 instruct convI2UL_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
8516 %{
8517 match(Set dst (AndL (ConvI2L src) mask));
8518
8519 ins_cost(ALU_COST * 2);
8520 format %{ "zext $dst, $src, 32\t# i2ul, #@convI2UL_reg_reg" %}
8521
8522 ins_encode %{
8523 __ zext(as_Register($dst$$reg), as_Register($src$$reg), 32);
8524 %}
8525
8526 ins_pipe(ialu_reg_shift);
8527 %}
8528
8529 // float <-> double
8530
8531 instruct convF2D_reg(fRegD dst, fRegF src) %{
8532 match(Set dst (ConvF2D src));
8533
8534 ins_cost(XFER_COST);
8535 format %{ "fcvt.d.s $dst, $src\t#@convF2D_reg" %}
8536
8537 ins_encode %{
8538 __ fcvt_d_s(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8539 %}
8540
8541 ins_pipe(fp_f2d);
8542 %}
8543
8544 instruct convD2F_reg(fRegF dst, fRegD src) %{
8545 match(Set dst (ConvD2F src));
8546
8547 ins_cost(XFER_COST);
8548 format %{ "fcvt.s.d $dst, $src\t#@convD2F_reg" %}
8549
8550 ins_encode %{
8551 __ fcvt_s_d(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
8552 %}
8553
8554 ins_pipe(fp_d2f);
8555 %}
8556
8557 // single <-> half precision
8558
8559 instruct convHF2F_reg_reg(fRegF dst, iRegINoSp src, iRegINoSp tmp) %{
8560 match(Set dst (ConvHF2F src));
8561 effect(TEMP tmp);
8562 format %{ "fmv.h.x $dst, $src\t# move source from $src to $dst\n\t"
8563 "fcvt.s.h $dst, $dst\t# convert half to single precision"
8564 %}
8565 ins_encode %{
8566 __ float16_to_float($dst$$FloatRegister, $src$$Register, $tmp$$Register);
8567 %}
8568 ins_pipe(pipe_slow);
8569 %}
8570
8571 instruct convF2HF_reg_reg(iRegINoSp dst, fRegF src, fRegF ftmp, iRegINoSp xtmp) %{
8572 match(Set dst (ConvF2HF src));
8573 effect(TEMP_DEF dst, TEMP ftmp, TEMP xtmp);
8574 format %{ "fcvt.h.s $ftmp, $src\t# convert single precision to half\n\t"
8575 "fmv.x.h $dst, $ftmp\t# move result from $ftmp to $dst"
8576 %}
8577 ins_encode %{
8578 __ float_to_float16($dst$$Register, $src$$FloatRegister, $ftmp$$FloatRegister, $xtmp$$Register);
8579 %}
8580 ins_pipe(pipe_slow);
8581 %}
8582
8583 // half precision operations
8584
8585 instruct reinterpretS2HF(fRegF dst, iRegI src)
8586 %{
8587 match(Set dst (ReinterpretS2HF src));
8588 format %{ "fmv.h.x $dst, $src" %}
8589 ins_encode %{
8590 __ fmv_h_x($dst$$FloatRegister, $src$$Register);
8591 %}
8592 ins_pipe(fp_i2f);
8593 %}
8594
8595 instruct convF2HFAndS2HF(fRegF dst, fRegF src)
8596 %{
8597 match(Set dst (ReinterpretS2HF (ConvF2HF src)));
8598 format %{ "convF2HFAndS2HF $dst, $src" %}
8599 ins_encode %{
8600 __ fcvt_h_s($dst$$FloatRegister, $src$$FloatRegister);
8601 %}
8602 ins_pipe(fp_uop_s);
8603 %}
8604
8605 instruct reinterpretHF2S(iRegINoSp dst, fRegF src)
8606 %{
8607 match(Set dst (ReinterpretHF2S src));
8608 format %{ "fmv.x.h $dst, $src" %}
8609 ins_encode %{
8610 __ fmv_x_h($dst$$Register, $src$$FloatRegister);
8611 %}
8612 ins_pipe(fp_f2i);
8613 %}
8614
8615 instruct convHF2SAndHF2F(fRegF dst, fRegF src)
8616 %{
8617 match(Set dst (ConvHF2F (ReinterpretHF2S src)));
8618 format %{ "convHF2SAndHF2F $dst, $src" %}
8619 ins_encode %{
8620 __ fcvt_s_h($dst$$FloatRegister, $src$$FloatRegister);
8621 %}
8622 ins_pipe(fp_uop_s);
8623 %}
8624
8625 instruct sqrt_HF_reg(fRegF dst, fRegF src)
8626 %{
8627 match(Set dst (SqrtHF src));
8628 format %{ "fsqrt.h $dst, $src" %}
8629 ins_encode %{
8630 __ fsqrt_h($dst$$FloatRegister, $src$$FloatRegister);
8631 %}
8632 ins_pipe(fp_sqrt_s);
8633 %}
8634
8635 instruct binOps_HF_reg(fRegF dst, fRegF src1, fRegF src2)
8636 %{
8637 match(Set dst (AddHF src1 src2));
8638 match(Set dst (SubHF src1 src2));
8639 match(Set dst (MulHF src1 src2));
8640 match(Set dst (DivHF src1 src2));
8641 format %{ "binop_hf $dst, $src1, $src2" %}
8642 ins_encode %{
8643 int opcode = this->ideal_Opcode();
8644 switch(opcode) {
8645 case Op_AddHF: __ fadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8646 case Op_SubHF: __ fsub_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8647 case Op_MulHF: __ fmul_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8648 case Op_DivHF: __ fdiv_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister); break;
8649 default: assert(false, "%s is not supported here", NodeClassNames[opcode]); break;
8650 }
8651 %}
8652 ins_pipe(fp_dop_reg_reg_s);
8653 %}
8654
8655 instruct min_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
8656 %{
8657 predicate(!UseZfa);
8658 match(Set dst (MinHF src1 src2));
8659 effect(KILL cr);
8660
8661 format %{ "min_hf $dst, $src1, $src2" %}
8662
8663 ins_encode %{
8664 __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
8665 __ FLOAT_TYPE::half_precision, true /* is_min */);
8666 %}
8667 ins_pipe(pipe_class_default);
8668 %}
8669
8670 instruct min_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
8671 %{
8672 predicate(UseZfa);
8673 match(Set dst (MinHF src1 src2));
8674
8675 format %{ "min_hf $dst, $src1, $src2" %}
8676
8677 ins_encode %{
8678 __ fminm_h(as_FloatRegister($dst$$reg),
8679 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
8680 %}
8681
8682 ins_pipe(pipe_class_default);
8683 %}
8684
8685 instruct max_HF_reg(fRegF dst, fRegF src1, fRegF src2, rFlagsReg cr)
8686 %{
8687 predicate(!UseZfa);
8688 match(Set dst (MaxHF src1 src2));
8689 effect(KILL cr);
8690
8691 format %{ "max_hf $dst, $src1, $src2" %}
8692
8693 ins_encode %{
8694 __ minmax_fp($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister,
8695 __ FLOAT_TYPE::half_precision, false /* is_min */);
8696 %}
8697 ins_pipe(pipe_class_default);
8698 %}
8699
8700 instruct max_HF_reg_zfa(fRegF dst, fRegF src1, fRegF src2)
8701 %{
8702 predicate(UseZfa);
8703 match(Set dst (MaxHF src1 src2));
8704
8705 format %{ "max_hf $dst, $src1, $src2" %}
8706
8707 ins_encode %{
8708 __ fmaxm_h(as_FloatRegister($dst$$reg),
8709 as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
8710 %}
8711
8712 ins_pipe(pipe_class_default);
8713 %}
8714
8715 instruct fma_HF_reg(fRegF dst, fRegF src1, fRegF src2, fRegF src3)
8716 %{
8717 match(Set dst (FmaHF src3 (Binary src1 src2)));
8718 format %{ "fmadd.h $dst, $src1, $src2, $src3\t# $dst = $src1 * $src2 + $src3 fma packedH" %}
8719 ins_encode %{
8720 __ fmadd_h($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
8721 %}
8722 ins_pipe(pipe_class_default);
8723 %}
8724
8725 // float <-> int
8726
8727 instruct convF2I_reg_reg(iRegINoSp dst, fRegF src) %{
8728 match(Set dst (ConvF2I src));
8729
8730 ins_cost(XFER_COST);
8731 format %{ "fcvt.w.s $dst, $src\t#@convF2I_reg_reg" %}
8732
8733 ins_encode %{
8734 __ fcvt_w_s_safe($dst$$Register, $src$$FloatRegister);
8735 %}
8736
8737 ins_pipe(fp_f2i);
8738 %}
8739
8740 instruct convI2F_reg_reg(fRegF dst, iRegIorL2I src) %{
8741 match(Set dst (ConvI2F src));
8742
8743 ins_cost(XFER_COST);
8744 format %{ "fcvt.s.w $dst, $src\t#@convI2F_reg_reg" %}
8745
8746 ins_encode %{
8747 __ fcvt_s_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8748 %}
8749
8750 ins_pipe(fp_i2f);
8751 %}
8752
8753 // float <-> long
8754
8755 instruct convF2L_reg_reg(iRegLNoSp dst, fRegF src) %{
8756 match(Set dst (ConvF2L src));
8757
8758 ins_cost(XFER_COST);
8759 format %{ "fcvt.l.s $dst, $src\t#@convF2L_reg_reg" %}
8760
8761 ins_encode %{
8762 __ fcvt_l_s_safe($dst$$Register, $src$$FloatRegister);
8763 %}
8764
8765 ins_pipe(fp_f2l);
8766 %}
8767
8768 instruct convL2F_reg_reg(fRegF dst, iRegL src) %{
8769 match(Set dst (ConvL2F src));
8770
8771 ins_cost(XFER_COST);
8772 format %{ "fcvt.s.l $dst, $src\t#@convL2F_reg_reg" %}
8773
8774 ins_encode %{
8775 __ fcvt_s_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8776 %}
8777
8778 ins_pipe(fp_l2f);
8779 %}
8780
8781 // double <-> int
8782
8783 instruct convD2I_reg_reg(iRegINoSp dst, fRegD src) %{
8784 match(Set dst (ConvD2I src));
8785
8786 ins_cost(XFER_COST);
8787 format %{ "fcvt.w.d $dst, $src\t#@convD2I_reg_reg" %}
8788
8789 ins_encode %{
8790 __ fcvt_w_d_safe($dst$$Register, $src$$FloatRegister);
8791 %}
8792
8793 ins_pipe(fp_d2i);
8794 %}
8795
8796 instruct convI2D_reg_reg(fRegD dst, iRegIorL2I src) %{
8797 match(Set dst (ConvI2D src));
8798
8799 ins_cost(XFER_COST);
8800 format %{ "fcvt.d.w $dst, $src\t#@convI2D_reg_reg" %}
8801
8802 ins_encode %{
8803 __ fcvt_d_w(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8804 %}
8805
8806 ins_pipe(fp_i2d);
8807 %}
8808
8809 // double <-> long
8810
8811 instruct convD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
8812 match(Set dst (ConvD2L src));
8813
8814 ins_cost(XFER_COST);
8815 format %{ "fcvt.l.d $dst, $src\t#@convD2L_reg_reg" %}
8816
8817 ins_encode %{
8818 __ fcvt_l_d_safe($dst$$Register, $src$$FloatRegister);
8819 %}
8820
8821 ins_pipe(fp_d2l);
8822 %}
8823
8824 instruct convL2D_reg_reg(fRegD dst, iRegL src) %{
8825 match(Set dst (ConvL2D src));
8826
8827 ins_cost(XFER_COST);
8828 format %{ "fcvt.d.l $dst, $src\t#@convL2D_reg_reg" %}
8829
8830 ins_encode %{
8831 __ fcvt_d_l(as_FloatRegister($dst$$reg), as_Register($src$$reg));
8832 %}
8833
8834 ins_pipe(fp_l2d);
8835 %}
8836
8837 // Convert oop into int for vectors alignment masking
8838 instruct convP2I(iRegINoSp dst, iRegP src) %{
8839 match(Set dst (ConvL2I (CastP2X src)));
8840
8841 ins_cost(ALU_COST * 2);
8842 format %{ "zext $dst, $src, 32\t# ptr -> int, #@convP2I" %}
8843
8844 ins_encode %{
8845 __ zext($dst$$Register, $src$$Register, 32);
8846 %}
8847
8848 ins_pipe(ialu_reg);
8849 %}
8850
8851 // Convert compressed oop into int for vectors alignment masking
8852 // in case of 32bit oops (heap < 4Gb).
8853 instruct convN2I(iRegINoSp dst, iRegN src)
8854 %{
8855 predicate(CompressedOops::shift() == 0);
8856 match(Set dst (ConvL2I (CastP2X (DecodeN src))));
8857
8858 ins_cost(ALU_COST);
8859 format %{ "mv $dst, $src\t# compressed ptr -> int, #@convN2I" %}
8860
8861 ins_encode %{
8862 __ mv($dst$$Register, $src$$Register);
8863 %}
8864
8865 ins_pipe(ialu_reg);
8866 %}
8867
8868 instruct round_double_reg(iRegLNoSp dst, fRegD src, fRegD ftmp) %{
8869 match(Set dst (RoundD src));
8870
8871 ins_cost(XFER_COST + BRANCH_COST);
8872 effect(TEMP ftmp);
8873 format %{ "java_round_double $dst, $src\t#@round_double_reg" %}
8874
8875 ins_encode %{
8876 __ java_round_double($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
8877 %}
8878
8879 ins_pipe(pipe_slow);
8880 %}
8881
8882 instruct round_float_reg(iRegINoSp dst, fRegF src, fRegF ftmp) %{
8883 match(Set dst (RoundF src));
8884
8885 ins_cost(XFER_COST + BRANCH_COST);
8886 effect(TEMP ftmp);
8887 format %{ "java_round_float $dst, $src\t#@round_float_reg" %}
8888
8889 ins_encode %{
8890 __ java_round_float($dst$$Register, as_FloatRegister($src$$reg), as_FloatRegister($ftmp$$reg));
8891 %}
8892
8893 ins_pipe(pipe_slow);
8894 %}
8895
8896 // Convert oop pointer into compressed form
8897 instruct encodeHeapOop(iRegNNoSp dst, iRegP src) %{
8898 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
8899 match(Set dst (EncodeP src));
8900 ins_cost(ALU_COST);
8901 format %{ "encode_heap_oop $dst, $src\t#@encodeHeapOop" %}
8902 ins_encode %{
8903 Register s = $src$$Register;
8904 Register d = $dst$$Register;
8905 __ encode_heap_oop(d, s);
8906 %}
8907 ins_pipe(pipe_class_default);
8908 %}
8909
8910 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src) %{
8911 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
8912 match(Set dst (EncodeP src));
8913 ins_cost(ALU_COST);
8914 format %{ "encode_heap_oop_not_null $dst, $src\t#@encodeHeapOop_not_null" %}
8915 ins_encode %{
8916 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
8917 %}
8918 ins_pipe(pipe_class_default);
8919 %}
8920
8921 instruct decodeHeapOop(iRegPNoSp dst, iRegN src) %{
8922 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
8923 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
8924 match(Set dst (DecodeN src));
8925
8926 ins_cost(0);
8927 format %{ "decode_heap_oop $dst, $src\t#@decodeHeapOop" %}
8928 ins_encode %{
8929 Register s = $src$$Register;
8930 Register d = $dst$$Register;
8931 __ decode_heap_oop(d, s);
8932 %}
8933 ins_pipe(pipe_class_default);
8934 %}
8935
8936 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src) %{
8937 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
8938 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
8939 match(Set dst (DecodeN src));
8940
8941 ins_cost(0);
8942 format %{ "decode_heap_oop_not_null $dst, $src\t#@decodeHeapOop_not_null" %}
8943 ins_encode %{
8944 Register s = $src$$Register;
8945 Register d = $dst$$Register;
8946 __ decode_heap_oop_not_null(d, s);
8947 %}
8948 ins_pipe(pipe_class_default);
8949 %}
8950
8951 // Convert klass pointer into compressed form.
8952 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
8953 match(Set dst (EncodePKlass src));
8954
8955 ins_cost(ALU_COST);
8956 format %{ "encode_klass_not_null $dst, $src\t#@encodeKlass_not_null" %}
8957
8958 ins_encode %{
8959 Register src_reg = as_Register($src$$reg);
8960 Register dst_reg = as_Register($dst$$reg);
8961 __ encode_klass_not_null(dst_reg, src_reg, t0);
8962 %}
8963
8964 ins_pipe(pipe_class_default);
8965 %}
8966
8967 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src, iRegPNoSp tmp) %{
8968 match(Set dst (DecodeNKlass src));
8969
8970 effect(TEMP tmp);
8971
8972 ins_cost(ALU_COST);
8973 format %{ "decode_klass_not_null $dst, $src\t#@decodeKlass_not_null" %}
8974
8975 ins_encode %{
8976 Register src_reg = as_Register($src$$reg);
8977 Register dst_reg = as_Register($dst$$reg);
8978 Register tmp_reg = as_Register($tmp$$reg);
8979 __ decode_klass_not_null(dst_reg, src_reg, tmp_reg);
8980 %}
8981
8982 ins_pipe(pipe_class_default);
8983 %}
8984
8985 // stack <-> reg and reg <-> reg shuffles with no conversion
8986
8987 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
8988
8989 match(Set dst (MoveF2I src));
8990
8991 effect(DEF dst, USE src);
8992
8993 ins_cost(LOAD_COST);
8994
8995 format %{ "lw $dst, $src\t#@MoveF2I_stack_reg" %}
8996
8997 ins_encode %{
8998 __ lw(as_Register($dst$$reg), Address(sp, $src$$disp));
8999 %}
9000
9001 ins_pipe(iload_reg_reg);
9002
9003 %}
9004
9005 instruct MoveI2F_stack_reg(fRegF dst, stackSlotI src) %{
9006
9007 match(Set dst (MoveI2F src));
9008
9009 effect(DEF dst, USE src);
9010
9011 ins_cost(LOAD_COST);
9012
9013 format %{ "flw $dst, $src\t#@MoveI2F_stack_reg" %}
9014
9015 ins_encode %{
9016 __ flw(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
9017 %}
9018
9019 ins_pipe(fp_load_mem_s);
9020
9021 %}
9022
9023 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
9024
9025 match(Set dst (MoveD2L src));
9026
9027 effect(DEF dst, USE src);
9028
9029 ins_cost(LOAD_COST);
9030
9031 format %{ "ld $dst, $src\t#@MoveD2L_stack_reg" %}
9032
9033 ins_encode %{
9034 __ ld(as_Register($dst$$reg), Address(sp, $src$$disp));
9035 %}
9036
9037 ins_pipe(iload_reg_reg);
9038
9039 %}
9040
9041 instruct MoveL2D_stack_reg(fRegD dst, stackSlotL src) %{
9042
9043 match(Set dst (MoveL2D src));
9044
9045 effect(DEF dst, USE src);
9046
9047 ins_cost(LOAD_COST);
9048
9049 format %{ "fld $dst, $src\t#@MoveL2D_stack_reg" %}
9050
9051 ins_encode %{
9052 __ fld(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
9053 %}
9054
9055 ins_pipe(fp_load_mem_d);
9056
9057 %}
9058
9059 instruct MoveF2I_reg_stack(stackSlotI dst, fRegF src) %{
9060
9061 match(Set dst (MoveF2I src));
9062
9063 effect(DEF dst, USE src);
9064
9065 ins_cost(STORE_COST);
9066
9067 format %{ "fsw $src, $dst\t#@MoveF2I_reg_stack" %}
9068
9069 ins_encode %{
9070 __ fsw(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
9071 %}
9072
9073 ins_pipe(fp_store_reg_s);
9074
9075 %}
9076
9077 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
9078
9079 match(Set dst (MoveI2F src));
9080
9081 effect(DEF dst, USE src);
9082
9083 ins_cost(STORE_COST);
9084
9085 format %{ "sw $src, $dst\t#@MoveI2F_reg_stack" %}
9086
9087 ins_encode %{
9088 __ sw(as_Register($src$$reg), Address(sp, $dst$$disp));
9089 %}
9090
9091 ins_pipe(istore_reg_reg);
9092
9093 %}
9094
9095 instruct MoveD2L_reg_stack(stackSlotL dst, fRegD src) %{
9096
9097 match(Set dst (MoveD2L src));
9098
9099 effect(DEF dst, USE src);
9100
9101 ins_cost(STORE_COST);
9102
9103 format %{ "fsd $dst, $src\t#@MoveD2L_reg_stack" %}
9104
9105 ins_encode %{
9106 __ fsd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
9107 %}
9108
9109 ins_pipe(fp_store_reg_d);
9110
9111 %}
9112
9113 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
9114
9115 match(Set dst (MoveL2D src));
9116
9117 effect(DEF dst, USE src);
9118
9119 ins_cost(STORE_COST);
9120
9121 format %{ "sd $src, $dst\t#@MoveL2D_reg_stack" %}
9122
9123 ins_encode %{
9124 __ sd(as_Register($src$$reg), Address(sp, $dst$$disp));
9125 %}
9126
9127 ins_pipe(istore_reg_reg);
9128
9129 %}
9130
9131 instruct MoveF2I_reg_reg(iRegINoSp dst, fRegF src) %{
9132
9133 match(Set dst (MoveF2I src));
9134
9135 effect(DEF dst, USE src);
9136
9137 ins_cost(FMVX_COST);
9138
9139 format %{ "fmv.x.w $dst, $src\t#@MoveF2I_reg_reg" %}
9140
9141 ins_encode %{
9142 __ fmv_x_w(as_Register($dst$$reg), as_FloatRegister($src$$reg));
9143 %}
9144
9145 ins_pipe(fp_f2i);
9146
9147 %}
9148
9149 instruct MoveI2F_reg_reg(fRegF dst, iRegI src) %{
9150
9151 match(Set dst (MoveI2F src));
9152
9153 effect(DEF dst, USE src);
9154
9155 ins_cost(FMVX_COST);
9156
9157 format %{ "fmv.w.x $dst, $src\t#@MoveI2F_reg_reg" %}
9158
9159 ins_encode %{
9160 __ fmv_w_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
9161 %}
9162
9163 ins_pipe(fp_i2f);
9164
9165 %}
9166
9167 instruct MoveD2L_reg_reg(iRegLNoSp dst, fRegD src) %{
9168
9169 match(Set dst (MoveD2L src));
9170
9171 effect(DEF dst, USE src);
9172
9173 ins_cost(FMVX_COST);
9174
9175 format %{ "fmv.x.d $dst, $src\t#@MoveD2L_reg_reg" %}
9176
9177 ins_encode %{
9178 __ fmv_x_d(as_Register($dst$$reg), as_FloatRegister($src$$reg));
9179 %}
9180
9181 ins_pipe(fp_d2l);
9182
9183 %}
9184
9185 instruct MoveL2D_reg_reg(fRegD dst, iRegL src) %{
9186
9187 match(Set dst (MoveL2D src));
9188
9189 effect(DEF dst, USE src);
9190
9191 ins_cost(FMVX_COST);
9192
9193 format %{ "fmv.d.x $dst, $src\t#@MoveL2D_reg_reg" %}
9194
9195 ins_encode %{
9196 __ fmv_d_x(as_FloatRegister($dst$$reg), as_Register($src$$reg));
9197 %}
9198
9199 ins_pipe(fp_l2d);
9200
9201 %}
9202
9203 // ============================================================================
9204 // Compare Instructions which set the result float comparisons in dest register.
9205
9206 instruct cmpF3_reg_reg(iRegINoSp dst, fRegF op1, fRegF op2)
9207 %{
9208 match(Set dst (CmpF3 op1 op2));
9209
9210 ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
9211 format %{ "flt.s $dst, $op2, $op1\t#@cmpF3_reg_reg\n\t"
9212 "bgtz $dst, done\n\t"
9213 "feq.s $dst, $op1, $op2\n\t"
9214 "addi $dst, $dst, -1\n\t"
9215 "done:"
9216 %}
9217
9218 ins_encode %{
9219 // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
9220 __ float_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg),
9221 as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
9222 %}
9223
9224 ins_pipe(pipe_class_default);
9225 %}
9226
9227 instruct cmpD3_reg_reg(iRegINoSp dst, fRegD op1, fRegD op2)
9228 %{
9229 match(Set dst (CmpD3 op1 op2));
9230
9231 ins_cost(XFER_COST * 2 + BRANCH_COST + ALU_COST);
9232 format %{ "flt.d $dst, $op2, $op1\t#@cmpD3_reg_reg\n\t"
9233 "bgtz $dst, done\n\t"
9234 "feq.d $dst, $op1, $op2\n\t"
9235 "addi $dst, $dst, -1\n\t"
9236 "done:"
9237 %}
9238
9239 ins_encode %{
9240 // we want -1 for unordered or less than, 0 for equal and 1 for greater than.
9241 __ double_compare(as_Register($dst$$reg), as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), -1 /*unordered_result < 0*/);
9242 %}
9243
9244 ins_pipe(pipe_class_default);
9245 %}
9246
9247 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
9248 %{
9249 match(Set dst (CmpL3 op1 op2));
9250
9251 ins_cost(ALU_COST * 3 + BRANCH_COST);
9252 format %{ "slt $dst, $op2, $op1\t#@cmpL3_reg_reg\n\t"
9253 "bnez $dst, done\n\t"
9254 "slt $dst, $op1, $op2\n\t"
9255 "neg $dst, $dst\n\t"
9256 "done:"
9257 %}
9258 ins_encode %{
9259 __ cmp_l2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
9260 __ mv(as_Register($dst$$reg), t0);
9261 %}
9262
9263 ins_pipe(pipe_class_default);
9264 %}
9265
9266 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL op1, iRegL op2)
9267 %{
9268 match(Set dst (CmpUL3 op1 op2));
9269
9270 ins_cost(ALU_COST * 3 + BRANCH_COST);
9271 format %{ "sltu $dst, $op2, $op1\t#@cmpUL3_reg_reg\n\t"
9272 "bnez $dst, done\n\t"
9273 "sltu $dst, $op1, $op2\n\t"
9274 "neg $dst, $dst\n\t"
9275 "done:"
9276 %}
9277 ins_encode %{
9278 __ cmp_ul2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
9279 __ mv(as_Register($dst$$reg), t0);
9280 %}
9281
9282 ins_pipe(pipe_class_default);
9283 %}
9284
9285 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI op1, iRegI op2)
9286 %{
9287 match(Set dst (CmpU3 op1 op2));
9288
9289 ins_cost(ALU_COST * 3 + BRANCH_COST);
9290 format %{ "sltu $dst, $op2, $op1\t#@cmpU3_reg_reg\n\t"
9291 "bnez $dst, done\n\t"
9292 "sltu $dst, $op1, $op2\n\t"
9293 "neg $dst, $dst\n\t"
9294 "done:"
9295 %}
9296 ins_encode %{
9297 __ cmp_uw2i(t0, as_Register($op1$$reg), as_Register($op2$$reg));
9298 __ mv(as_Register($dst$$reg), t0);
9299 %}
9300
9301 ins_pipe(pipe_class_default);
9302 %}
9303
9304 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegI p, iRegI q)
9305 %{
9306 match(Set dst (CmpLTMask p q));
9307
9308 ins_cost(2 * ALU_COST);
9309
9310 format %{ "slt $dst, $p, $q\t#@cmpLTMask_reg_reg\n\t"
9311 "subw $dst, zr, $dst\t#@cmpLTMask_reg_reg"
9312 %}
9313
9314 ins_encode %{
9315 __ slt(as_Register($dst$$reg), as_Register($p$$reg), as_Register($q$$reg));
9316 __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
9317 %}
9318
9319 ins_pipe(ialu_reg_reg);
9320 %}
9321
9322 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I op, immI0 zero)
9323 %{
9324 match(Set dst (CmpLTMask op zero));
9325
9326 ins_cost(ALU_COST);
9327
9328 format %{ "sraiw $dst, $dst, 31\t#@cmpLTMask_reg_reg" %}
9329
9330 ins_encode %{
9331 __ sraiw(as_Register($dst$$reg), as_Register($op$$reg), 31);
9332 %}
9333
9334 ins_pipe(ialu_reg_shift);
9335 %}
9336
9337
9338 // ============================================================================
9339 // Max and Min
9340
9341 instruct minI_reg_reg(iRegINoSp dst, iRegI src)
9342 %{
9343 match(Set dst (MinI dst src));
9344
9345 ins_cost(BRANCH_COST + ALU_COST);
9346 format %{"minI_reg_reg $dst, $dst, $src\t#@minI_reg_reg\n\t"%}
9347
9348 ins_encode %{
9349 __ cmov_gt(as_Register($dst$$reg), as_Register($src$$reg),
9350 as_Register($dst$$reg), as_Register($src$$reg));
9351 %}
9352
9353 ins_pipe(pipe_class_compare);
9354 %}
9355
9356 instruct maxI_reg_reg(iRegINoSp dst, iRegI src)
9357 %{
9358 match(Set dst (MaxI dst src));
9359
9360 ins_cost(BRANCH_COST + ALU_COST);
9361 format %{"maxI_reg_reg $dst, $dst, $src\t#@maxI_reg_reg\n\t"%}
9362
9363 ins_encode %{
9364 __ cmov_lt(as_Register($dst$$reg), as_Register($src$$reg),
9365 as_Register($dst$$reg), as_Register($src$$reg));
9366 %}
9367
9368 ins_pipe(pipe_class_compare);
9369 %}
9370
9371 // special case for comparing with zero
9372 // n.b. this is selected in preference to the rule above because it
9373 // avoids loading constant 0 into a source register
9374
9375 instruct minI_reg_zero(iRegINoSp dst, immI0 zero)
9376 %{
9377 match(Set dst (MinI dst zero));
9378 match(Set dst (MinI zero dst));
9379
9380 ins_cost(BRANCH_COST + ALU_COST);
9381 format %{"minI_reg_zero $dst, $dst, zr\t#@minI_reg_zero\n\t"%}
9382
9383 ins_encode %{
9384 __ cmov_gt(as_Register($dst$$reg), zr,
9385 as_Register($dst$$reg), zr);
9386 %}
9387
9388 ins_pipe(pipe_class_compare);
9389 %}
9390
9391 instruct maxI_reg_zero(iRegINoSp dst, immI0 zero)
9392 %{
9393 match(Set dst (MaxI dst zero));
9394 match(Set dst (MaxI zero dst));
9395
9396 ins_cost(BRANCH_COST + ALU_COST);
9397 format %{"maxI_reg_zero $dst, $dst, zr\t#@maxI_reg_zero\n\t"%}
9398
9399 ins_encode %{
9400 __ cmov_lt(as_Register($dst$$reg), zr,
9401 as_Register($dst$$reg), zr);
9402 %}
9403
9404 ins_pipe(pipe_class_compare);
9405 %}
9406
9407 instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
9408 %{
9409 match(Set dst (MinI src1 src2));
9410
9411 effect(DEF dst, USE src1, USE src2);
9412
9413 ins_cost(BRANCH_COST + ALU_COST * 2);
9414 format %{"minI_rReg $dst, $src1, $src2\t#@minI_rReg\n\t"%}
9415
9416 ins_encode %{
9417 __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
9418 __ cmov_gt(as_Register($src1$$reg), as_Register($src2$$reg),
9419 as_Register($dst$$reg), as_Register($src2$$reg));
9420 %}
9421
9422 ins_pipe(pipe_class_compare);
9423 %}
9424
9425 instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
9426 %{
9427 match(Set dst (MaxI src1 src2));
9428
9429 effect(DEF dst, USE src1, USE src2);
9430
9431 ins_cost(BRANCH_COST + ALU_COST * 2);
9432 format %{"maxI_rReg $dst, $src1, $src2\t#@maxI_rReg\n\t"%}
9433
9434 ins_encode %{
9435 __ mv(as_Register($dst$$reg), as_Register($src1$$reg));
9436 __ cmov_lt(as_Register($src1$$reg), as_Register($src2$$reg),
9437 as_Register($dst$$reg), as_Register($src2$$reg));
9438 %}
9439
9440 ins_pipe(pipe_class_compare);
9441 %}
9442
9443 // ============================================================================
9444 // Branch Instructions
9445 // Direct Branch.
9446 instruct branch(label lbl)
9447 %{
9448 match(Goto);
9449
9450 effect(USE lbl);
9451
9452 ins_cost(BRANCH_COST);
9453 format %{ "j $lbl\t#@branch" %}
9454
9455 ins_encode(riscv_enc_j(lbl));
9456
9457 ins_pipe(pipe_branch);
9458 %}
9459
9460 // ============================================================================
9461 // Compare and Branch Instructions
9462
9463 // Patterns for short (< 12KiB) variants
9464
9465 // Compare flags and branch near instructions.
9466 instruct cmpFlag_branch(cmpOpEqNe cmp, rFlagsReg cr, label lbl) %{
9467 match(If cmp cr);
9468 effect(USE lbl);
9469
9470 ins_cost(BRANCH_COST);
9471 format %{ "b$cmp $cr, zr, $lbl\t#@cmpFlag_branch" %}
9472
9473 ins_encode %{
9474 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label));
9475 %}
9476 ins_pipe(pipe_cmpz_branch);
9477 ins_short_branch(1);
9478 %}
9479
9480 // Compare signed int and branch near instructions
9481 instruct cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
9482 %{
9483 // Same match rule as `far_cmpI_branch'.
9484 match(If cmp (CmpI op1 op2));
9485
9486 effect(USE lbl);
9487
9488 ins_cost(BRANCH_COST);
9489
9490 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpI_branch" %}
9491
9492 ins_encode %{
9493 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9494 %}
9495
9496 ins_pipe(pipe_cmp_branch);
9497 ins_short_branch(1);
9498 %}
9499
9500 instruct cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl)
9501 %{
9502 // Same match rule as `far_cmpI_loop'.
9503 match(CountedLoopEnd cmp (CmpI op1 op2));
9504
9505 effect(USE lbl);
9506
9507 ins_cost(BRANCH_COST);
9508
9509 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpI_loop" %}
9510
9511 ins_encode %{
9512 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9513 %}
9514
9515 ins_pipe(pipe_cmp_branch);
9516 ins_short_branch(1);
9517 %}
9518
9519 // Compare unsigned int and branch near instructions
9520 instruct cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl)
9521 %{
9522 // Same match rule as `far_cmpU_branch'.
9523 match(If cmp (CmpU op1 op2));
9524
9525 effect(USE lbl);
9526
9527 ins_cost(BRANCH_COST);
9528
9529 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpU_branch" %}
9530
9531 ins_encode %{
9532 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9533 as_Register($op2$$reg), *($lbl$$label));
9534 %}
9535
9536 ins_pipe(pipe_cmp_branch);
9537 ins_short_branch(1);
9538 %}
9539
9540 // Compare signed long and branch near instructions
9541 instruct cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
9542 %{
9543 // Same match rule as `far_cmpL_branch'.
9544 match(If cmp (CmpL op1 op2));
9545
9546 effect(USE lbl);
9547
9548 ins_cost(BRANCH_COST);
9549
9550 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpL_branch" %}
9551
9552 ins_encode %{
9553 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9554 %}
9555
9556 ins_pipe(pipe_cmp_branch);
9557 ins_short_branch(1);
9558 %}
9559
9560 instruct cmpL_loop(cmpOp cmp, iRegL op1, iRegL op2, label lbl)
9561 %{
9562 // Same match rule as `far_cmpL_loop'.
9563 match(CountedLoopEnd cmp (CmpL op1 op2));
9564
9565 effect(USE lbl);
9566
9567 ins_cost(BRANCH_COST);
9568
9569 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpL_loop" %}
9570
9571 ins_encode %{
9572 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label));
9573 %}
9574
9575 ins_pipe(pipe_cmp_branch);
9576 ins_short_branch(1);
9577 %}
9578
9579 // Compare unsigned long and branch near instructions
9580 instruct cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl)
9581 %{
9582 // Same match rule as `far_cmpUL_branch'.
9583 match(If cmp (CmpUL op1 op2));
9584
9585 effect(USE lbl);
9586
9587 ins_cost(BRANCH_COST);
9588 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpUL_branch" %}
9589
9590 ins_encode %{
9591 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9592 as_Register($op2$$reg), *($lbl$$label));
9593 %}
9594
9595 ins_pipe(pipe_cmp_branch);
9596 ins_short_branch(1);
9597 %}
9598
9599 // Compare pointer and branch near instructions
9600 instruct cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
9601 %{
9602 // Same match rule as `far_cmpP_branch'.
9603 match(If cmp (CmpP op1 op2));
9604
9605 effect(USE lbl);
9606
9607 ins_cost(BRANCH_COST);
9608
9609 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpP_branch" %}
9610
9611 ins_encode %{
9612 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9613 as_Register($op2$$reg), *($lbl$$label));
9614 %}
9615
9616 ins_pipe(pipe_cmp_branch);
9617 ins_short_branch(1);
9618 %}
9619
9620 // Compare narrow pointer and branch near instructions
9621 instruct cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
9622 %{
9623 // Same match rule as `far_cmpN_branch'.
9624 match(If cmp (CmpN op1 op2));
9625
9626 effect(USE lbl);
9627
9628 ins_cost(BRANCH_COST);
9629
9630 format %{ "b$cmp $op1, $op2, $lbl\t#@cmpN_branch" %}
9631
9632 ins_encode %{
9633 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9634 as_Register($op2$$reg), *($lbl$$label));
9635 %}
9636
9637 ins_pipe(pipe_cmp_branch);
9638 ins_short_branch(1);
9639 %}
9640
9641 // Compare float and branch near instructions
9642 instruct cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
9643 %{
9644 // Same match rule as `far_cmpF_branch'.
9645 match(If cmp (CmpF op1 op2));
9646
9647 effect(USE lbl);
9648
9649 ins_cost(XFER_COST + BRANCH_COST);
9650 format %{ "float_b$cmp $op1, $op2, $lbl \t#@cmpF_branch"%}
9651
9652 ins_encode %{
9653 __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg), *($lbl$$label));
9654 %}
9655
9656 ins_pipe(pipe_class_compare);
9657 ins_short_branch(1);
9658 %}
9659
9660 // Compare double and branch near instructions
9661 instruct cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
9662 %{
9663 // Same match rule as `far_cmpD_branch'.
9664 match(If cmp (CmpD op1 op2));
9665 effect(USE lbl);
9666
9667 ins_cost(XFER_COST + BRANCH_COST);
9668 format %{ "double_b$cmp $op1, $op2, $lbl\t#@cmpD_branch"%}
9669
9670 ins_encode %{
9671 __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
9672 as_FloatRegister($op2$$reg), *($lbl$$label));
9673 %}
9674
9675 ins_pipe(pipe_class_compare);
9676 ins_short_branch(1);
9677 %}
9678
9679 // Compare signed int with zero and branch near instructions
9680 instruct cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
9681 %{
9682 // Same match rule as `far_cmpI_reg_imm0_branch'.
9683 match(If cmp (CmpI op1 zero));
9684
9685 effect(USE op1, USE lbl);
9686
9687 ins_cost(BRANCH_COST);
9688 format %{ "b$cmp $op1, zr, $lbl\t#@cmpI_reg_imm0_branch" %}
9689
9690 ins_encode %{
9691 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9692 %}
9693
9694 ins_pipe(pipe_cmpz_branch);
9695 ins_short_branch(1);
9696 %}
9697
9698 instruct cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
9699 %{
9700 // Same match rule as `far_cmpI_reg_imm0_loop'.
9701 match(CountedLoopEnd cmp (CmpI op1 zero));
9702
9703 effect(USE op1, USE lbl);
9704
9705 ins_cost(BRANCH_COST);
9706
9707 format %{ "b$cmp $op1, zr, $lbl\t#@cmpI_reg_imm0_loop" %}
9708
9709 ins_encode %{
9710 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9711 %}
9712
9713 ins_pipe(pipe_cmpz_branch);
9714 ins_short_branch(1);
9715 %}
9716
9717 // Compare unsigned int with zero and branch near instructions
9718 instruct cmpUEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
9719 %{
9720 // Same match rule as `far_cmpUEqNeLeGt_reg_imm0_branch'.
9721 match(If cmp (CmpU op1 zero));
9722
9723 effect(USE op1, USE lbl);
9724
9725 ins_cost(BRANCH_COST);
9726
9727 format %{ "b$cmp $op1, zr, $lbl\t#@cmpUEqNeLeGt_reg_imm0_branch" %}
9728
9729 ins_encode %{
9730 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9731 %}
9732
9733 ins_pipe(pipe_cmpz_branch);
9734 ins_short_branch(1);
9735 %}
9736
9737 // Compare signed long with zero and branch near instructions
9738 instruct cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
9739 %{
9740 // Same match rule as `far_cmpL_reg_imm0_branch'.
9741 match(If cmp (CmpL op1 zero));
9742
9743 effect(USE op1, USE lbl);
9744
9745 ins_cost(BRANCH_COST);
9746
9747 format %{ "b$cmp $op1, zr, $lbl\t#@cmpL_reg_imm0_branch" %}
9748
9749 ins_encode %{
9750 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9751 %}
9752
9753 ins_pipe(pipe_cmpz_branch);
9754 ins_short_branch(1);
9755 %}
9756
9757 instruct cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
9758 %{
9759 // Same match rule as `far_cmpL_reg_imm0_loop'.
9760 match(CountedLoopEnd cmp (CmpL op1 zero));
9761
9762 effect(USE op1, USE lbl);
9763
9764 ins_cost(BRANCH_COST);
9765
9766 format %{ "b$cmp $op1, zr, $lbl\t#@cmpL_reg_imm0_loop" %}
9767
9768 ins_encode %{
9769 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label));
9770 %}
9771
9772 ins_pipe(pipe_cmpz_branch);
9773 ins_short_branch(1);
9774 %}
9775
9776 // Compare unsigned long with zero and branch near instructions
9777 instruct cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
9778 %{
9779 // Same match rule as `far_cmpULEqNeLeGt_reg_imm0_branch'.
9780 match(If cmp (CmpUL op1 zero));
9781
9782 effect(USE op1, USE lbl);
9783
9784 ins_cost(BRANCH_COST);
9785
9786 format %{ "b$cmp $op1, zr, $lbl\t#@cmpULEqNeLeGt_reg_imm0_branch" %}
9787
9788 ins_encode %{
9789 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9790 %}
9791
9792 ins_pipe(pipe_cmpz_branch);
9793 ins_short_branch(1);
9794 %}
9795
9796 // Compare pointer with zero and branch near instructions
9797 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
9798 // Same match rule as `far_cmpP_reg_imm0_branch'.
9799 match(If cmp (CmpP op1 zero));
9800 effect(USE lbl);
9801
9802 ins_cost(BRANCH_COST);
9803 format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_imm0_branch" %}
9804
9805 ins_encode %{
9806 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9807 %}
9808
9809 ins_pipe(pipe_cmpz_branch);
9810 ins_short_branch(1);
9811 %}
9812
9813 // Compare narrow pointer with zero and branch near instructions
9814 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
9815 // Same match rule as `far_cmpN_reg_imm0_branch'.
9816 match(If cmp (CmpN op1 zero));
9817 effect(USE lbl);
9818
9819 ins_cost(BRANCH_COST);
9820
9821 format %{ "b$cmp $op1, zr, $lbl\t#@cmpN_imm0_branch" %}
9822
9823 ins_encode %{
9824 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9825 %}
9826
9827 ins_pipe(pipe_cmpz_branch);
9828 ins_short_branch(1);
9829 %}
9830
9831 // Compare narrow pointer with pointer zero and branch near instructions
9832 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
9833 // Same match rule as `far_cmpP_narrowOop_imm0_branch'.
9834 match(If cmp (CmpP (DecodeN op1) zero));
9835 effect(USE lbl);
9836
9837 ins_cost(BRANCH_COST);
9838 format %{ "b$cmp $op1, zr, $lbl\t#@cmpP_narrowOop_imm0_branch" %}
9839
9840 ins_encode %{
9841 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label));
9842 %}
9843
9844 ins_pipe(pipe_cmpz_branch);
9845 ins_short_branch(1);
9846 %}
9847
9848 // Patterns for far (20KiB) variants
9849
9850 instruct far_cmpFlag_branch(cmpOp cmp, rFlagsReg cr, label lbl) %{
9851 match(If cmp cr);
9852 effect(USE lbl);
9853
9854 ins_cost(BRANCH_COST);
9855 format %{ "far_b$cmp $cr, zr, $lbl\t#@far_cmpFlag_branch"%}
9856
9857 ins_encode %{
9858 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($cr$$reg), *($lbl$$label), /* is_far */ true);
9859 %}
9860
9861 ins_pipe(pipe_cmpz_branch);
9862 %}
9863
9864 // Compare signed int and branch far instructions
9865 instruct far_cmpI_branch(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
9866 match(If cmp (CmpI op1 op2));
9867 effect(USE lbl);
9868
9869 ins_cost(BRANCH_COST * 2);
9870
9871 // the format instruction [far_b$cmp] here is be used as two insructions
9872 // in macroassembler: b$not_cmp(op1, op2, done), j($lbl), bind(done)
9873 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpI_branch" %}
9874
9875 ins_encode %{
9876 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9877 %}
9878
9879 ins_pipe(pipe_cmp_branch);
9880 %}
9881
9882 instruct far_cmpI_loop(cmpOp cmp, iRegI op1, iRegI op2, label lbl) %{
9883 match(CountedLoopEnd cmp (CmpI op1 op2));
9884 effect(USE lbl);
9885
9886 ins_cost(BRANCH_COST * 2);
9887 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpI_loop" %}
9888
9889 ins_encode %{
9890 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9891 %}
9892
9893 ins_pipe(pipe_cmp_branch);
9894 %}
9895
9896 instruct far_cmpU_branch(cmpOpU cmp, iRegI op1, iRegI op2, label lbl) %{
9897 match(If cmp (CmpU op1 op2));
9898 effect(USE lbl);
9899
9900 ins_cost(BRANCH_COST * 2);
9901 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpU_branch" %}
9902
9903 ins_encode %{
9904 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9905 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9906 %}
9907
9908 ins_pipe(pipe_cmp_branch);
9909 %}
9910
9911 instruct far_cmpL_branch(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
9912 match(If cmp (CmpL op1 op2));
9913 effect(USE lbl);
9914
9915 ins_cost(BRANCH_COST * 2);
9916 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpL_branch" %}
9917
9918 ins_encode %{
9919 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9920 %}
9921
9922 ins_pipe(pipe_cmp_branch);
9923 %}
9924
9925 instruct far_cmpLloop(cmpOp cmp, iRegL op1, iRegL op2, label lbl) %{
9926 match(CountedLoopEnd cmp (CmpL op1 op2));
9927 effect(USE lbl);
9928
9929 ins_cost(BRANCH_COST * 2);
9930 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpL_loop" %}
9931
9932 ins_encode %{
9933 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9934 %}
9935
9936 ins_pipe(pipe_cmp_branch);
9937 %}
9938
9939 instruct far_cmpUL_branch(cmpOpU cmp, iRegL op1, iRegL op2, label lbl) %{
9940 match(If cmp (CmpUL op1 op2));
9941 effect(USE lbl);
9942
9943 ins_cost(BRANCH_COST * 2);
9944 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpUL_branch" %}
9945
9946 ins_encode %{
9947 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9948 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9949 %}
9950
9951 ins_pipe(pipe_cmp_branch);
9952 %}
9953
9954 instruct far_cmpP_branch(cmpOpU cmp, iRegP op1, iRegP op2, label lbl)
9955 %{
9956 match(If cmp (CmpP op1 op2));
9957
9958 effect(USE lbl);
9959
9960 ins_cost(BRANCH_COST * 2);
9961
9962 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpP_branch" %}
9963
9964 ins_encode %{
9965 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9966 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9967 %}
9968
9969 ins_pipe(pipe_cmp_branch);
9970 %}
9971
9972 instruct far_cmpN_branch(cmpOpU cmp, iRegN op1, iRegN op2, label lbl)
9973 %{
9974 match(If cmp (CmpN op1 op2));
9975
9976 effect(USE lbl);
9977
9978 ins_cost(BRANCH_COST * 2);
9979
9980 format %{ "far_b$cmp $op1, $op2, $lbl\t#@far_cmpN_branch" %}
9981
9982 ins_encode %{
9983 __ cmp_branch($cmp$$cmpcode | C2_MacroAssembler::unsigned_branch_mask, as_Register($op1$$reg),
9984 as_Register($op2$$reg), *($lbl$$label), /* is_far */ true);
9985 %}
9986
9987 ins_pipe(pipe_cmp_branch);
9988 %}
9989
9990 // Float compare and branch instructions
9991 instruct far_cmpF_branch(cmpOp cmp, fRegF op1, fRegF op2, label lbl)
9992 %{
9993 match(If cmp (CmpF op1 op2));
9994
9995 effect(USE lbl);
9996
9997 ins_cost(XFER_COST + BRANCH_COST * 2);
9998 format %{ "far_float_b$cmp $op1, $op2, $lbl\t#@far_cmpF_branch"%}
9999
10000 ins_encode %{
10001 __ float_cmp_branch($cmp$$cmpcode, as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10002 *($lbl$$label), /* is_far */ true);
10003 %}
10004
10005 ins_pipe(pipe_class_compare);
10006 %}
10007
10008 // Double compare and branch instructions
10009 instruct far_cmpD_branch(cmpOp cmp, fRegD op1, fRegD op2, label lbl)
10010 %{
10011 match(If cmp (CmpD op1 op2));
10012 effect(USE lbl);
10013
10014 ins_cost(XFER_COST + BRANCH_COST * 2);
10015 format %{ "far_double_b$cmp $op1, $op2, $lbl\t#@far_cmpD_branch"%}
10016
10017 ins_encode %{
10018 __ float_cmp_branch($cmp$$cmpcode | C2_MacroAssembler::double_branch_mask, as_FloatRegister($op1$$reg),
10019 as_FloatRegister($op2$$reg), *($lbl$$label), /* is_far */ true);
10020 %}
10021
10022 ins_pipe(pipe_class_compare);
10023 %}
10024
10025 instruct far_cmpI_reg_imm0_branch(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
10026 %{
10027 match(If cmp (CmpI op1 zero));
10028
10029 effect(USE op1, USE lbl);
10030
10031 ins_cost(BRANCH_COST * 2);
10032
10033 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpI_reg_imm0_branch" %}
10034
10035 ins_encode %{
10036 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10037 %}
10038
10039 ins_pipe(pipe_cmpz_branch);
10040 %}
10041
10042 instruct far_cmpI_reg_imm0_loop(cmpOp cmp, iRegI op1, immI0 zero, label lbl)
10043 %{
10044 match(CountedLoopEnd cmp (CmpI op1 zero));
10045
10046 effect(USE op1, USE lbl);
10047
10048 ins_cost(BRANCH_COST * 2);
10049
10050 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpI_reg_imm0_loop" %}
10051
10052 ins_encode %{
10053 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10054 %}
10055
10056 ins_pipe(pipe_cmpz_branch);
10057 %}
10058
10059 instruct far_cmpUEqNeLeGt_imm0_branch(cmpOpUEqNeLeGt cmp, iRegI op1, immI0 zero, label lbl)
10060 %{
10061 match(If cmp (CmpU op1 zero));
10062
10063 effect(USE op1, USE lbl);
10064
10065 ins_cost(BRANCH_COST * 2);
10066
10067 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpUEqNeLeGt_imm0_branch" %}
10068
10069 ins_encode %{
10070 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10071 %}
10072
10073 ins_pipe(pipe_cmpz_branch);
10074 %}
10075
10076 // compare lt/ge unsigned instructs has no short instruct with same match
10077 instruct far_cmpULtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegI op1, immI0 zero, label lbl)
10078 %{
10079 match(If cmp (CmpU op1 zero));
10080
10081 effect(USE op1, USE lbl);
10082
10083 ins_cost(BRANCH_COST);
10084
10085 format %{ "j $lbl if $cmp == ge\t#@far_cmpULtGe_reg_imm0_branch" %}
10086
10087 ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
10088
10089 ins_pipe(pipe_cmpz_branch);
10090 %}
10091
10092 instruct far_cmpL_reg_imm0_branch(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
10093 %{
10094 match(If cmp (CmpL op1 zero));
10095
10096 effect(USE op1, USE lbl);
10097
10098 ins_cost(BRANCH_COST * 2);
10099
10100 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpL_reg_imm0_branch" %}
10101
10102 ins_encode %{
10103 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10104 %}
10105
10106 ins_pipe(pipe_cmpz_branch);
10107 %}
10108
10109 instruct far_cmpL_reg_imm0_loop(cmpOp cmp, iRegL op1, immL0 zero, label lbl)
10110 %{
10111 match(CountedLoopEnd cmp (CmpL op1 zero));
10112
10113 effect(USE op1, USE lbl);
10114
10115 ins_cost(BRANCH_COST * 2);
10116
10117 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpL_reg_imm0_loop" %}
10118
10119 ins_encode %{
10120 __ cmp_branch($cmp$$cmpcode, as_Register($op1$$reg), zr, *($lbl$$label), /* is_far */ true);
10121 %}
10122
10123 ins_pipe(pipe_cmpz_branch);
10124 %}
10125
10126 instruct far_cmpULEqNeLeGt_reg_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 zero, label lbl)
10127 %{
10128 match(If cmp (CmpUL op1 zero));
10129
10130 effect(USE op1, USE lbl);
10131
10132 ins_cost(BRANCH_COST * 2);
10133
10134 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpULEqNeLeGt_reg_imm0_branch" %}
10135
10136 ins_encode %{
10137 __ enc_cmpUEqNeLeGt_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10138 %}
10139
10140 ins_pipe(pipe_cmpz_branch);
10141 %}
10142
10143 // compare lt/ge unsigned instructs has no short instruct with same match
10144 instruct far_cmpULLtGe_reg_imm0_branch(cmpOpULtGe cmp, iRegL op1, immL0 zero, label lbl)
10145 %{
10146 match(If cmp (CmpUL op1 zero));
10147
10148 effect(USE op1, USE lbl);
10149
10150 ins_cost(BRANCH_COST);
10151
10152 format %{ "j $lbl if $cmp == ge\t#@far_cmpULLtGe_reg_imm0_branch" %}
10153
10154 ins_encode(riscv_enc_far_cmpULtGe_imm0_branch(cmp, op1, lbl));
10155
10156 ins_pipe(pipe_cmpz_branch);
10157 %}
10158
10159 instruct far_cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 zero, label lbl) %{
10160 match(If cmp (CmpP op1 zero));
10161 effect(USE lbl);
10162
10163 ins_cost(BRANCH_COST * 2);
10164 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_imm0_branch" %}
10165
10166 ins_encode %{
10167 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10168 %}
10169
10170 ins_pipe(pipe_cmpz_branch);
10171 %}
10172
10173 instruct far_cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 zero, label lbl) %{
10174 match(If cmp (CmpN op1 zero));
10175 effect(USE lbl);
10176
10177 ins_cost(BRANCH_COST * 2);
10178
10179 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpN_imm0_branch" %}
10180
10181 ins_encode %{
10182 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10183 %}
10184
10185 ins_pipe(pipe_cmpz_branch);
10186 %}
10187
10188 instruct far_cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN op1, immP0 zero, label lbl) %{
10189 match(If cmp (CmpP (DecodeN op1) zero));
10190 effect(USE lbl);
10191
10192 ins_cost(BRANCH_COST * 2);
10193 format %{ "far_b$cmp $op1, zr, $lbl\t#@far_cmpP_narrowOop_imm0_branch" %}
10194
10195 ins_encode %{
10196 __ enc_cmpEqNe_imm0_branch($cmp$$cmpcode, as_Register($op1$$reg), *($lbl$$label), /* is_far */ true);
10197 %}
10198
10199 ins_pipe(pipe_cmpz_branch);
10200 %}
10201
10202 // ============================================================================
10203 // Conditional Move Instructions
10204
10205 // --------- CMoveI ---------
10206
10207 instruct cmovI_cmpI(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOp cop) %{
10208 match(Set dst (CMoveI (Binary cop (CmpI op1 op2)) (Binary dst src)));
10209 ins_cost(ALU_COST + BRANCH_COST);
10210
10211 format %{
10212 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpI\n\t"
10213 %}
10214
10215 ins_encode %{
10216 __ enc_cmove($cop$$cmpcode,
10217 as_Register($op1$$reg), as_Register($op2$$reg),
10218 as_Register($dst$$reg), as_Register($src$$reg));
10219 %}
10220
10221 ins_pipe(pipe_class_compare);
10222 %}
10223
10224 instruct cmovI_cmpU(iRegINoSp dst, iRegI src, iRegI op1, iRegI op2, cmpOpU cop) %{
10225 match(Set dst (CMoveI (Binary cop (CmpU op1 op2)) (Binary dst src)));
10226 ins_cost(ALU_COST + BRANCH_COST);
10227
10228 format %{
10229 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpU\n\t"
10230 %}
10231
10232 ins_encode %{
10233 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10234 as_Register($op1$$reg), as_Register($op2$$reg),
10235 as_Register($dst$$reg), as_Register($src$$reg));
10236 %}
10237
10238 ins_pipe(pipe_class_compare);
10239 %}
10240
10241 instruct cmovI_cmpL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOp cop) %{
10242 match(Set dst (CMoveI (Binary cop (CmpL op1 op2)) (Binary dst src)));
10243 ins_cost(ALU_COST + BRANCH_COST);
10244
10245 format %{
10246 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpL\n\t"
10247 %}
10248
10249 ins_encode %{
10250 __ enc_cmove($cop$$cmpcode,
10251 as_Register($op1$$reg), as_Register($op2$$reg),
10252 as_Register($dst$$reg), as_Register($src$$reg));
10253 %}
10254
10255 ins_pipe(pipe_class_compare);
10256 %}
10257
10258 instruct cmovI_cmpUL(iRegINoSp dst, iRegI src, iRegL op1, iRegL op2, cmpOpU cop) %{
10259 match(Set dst (CMoveI (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10260 ins_cost(ALU_COST + BRANCH_COST);
10261
10262 format %{
10263 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpUL\n\t"
10264 %}
10265
10266 ins_encode %{
10267 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10268 as_Register($op1$$reg), as_Register($op2$$reg),
10269 as_Register($dst$$reg), as_Register($src$$reg));
10270 %}
10271
10272 ins_pipe(pipe_class_compare);
10273 %}
10274
10275 instruct cmovI_cmpF(iRegINoSp dst, iRegI src, fRegF op1, fRegF op2, cmpOp cop) %{
10276 match(Set dst (CMoveI (Binary cop (CmpF op1 op2)) (Binary dst src)));
10277 ins_cost(ALU_COST + BRANCH_COST);
10278
10279 format %{
10280 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpF\n\t"
10281 %}
10282
10283 ins_encode %{
10284 __ enc_cmove_cmp_fp($cop$$cmpcode,
10285 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10286 as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10287 %}
10288
10289 ins_pipe(pipe_class_compare);
10290 %}
10291
10292 instruct cmovI_cmpD(iRegINoSp dst, iRegI src, fRegD op1, fRegD op2, cmpOp cop) %{
10293 match(Set dst (CMoveI (Binary cop (CmpD op1 op2)) (Binary dst src)));
10294 ins_cost(ALU_COST + BRANCH_COST);
10295
10296 format %{
10297 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpD\n\t"
10298 %}
10299
10300 ins_encode %{
10301 __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10302 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10303 as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10304 %}
10305
10306 ins_pipe(pipe_class_compare);
10307 %}
10308
10309 instruct cmovI_cmpN(iRegINoSp dst, iRegI src, iRegN op1, iRegN op2, cmpOpU cop) %{
10310 match(Set dst (CMoveI (Binary cop (CmpN op1 op2)) (Binary dst src)));
10311 ins_cost(ALU_COST + BRANCH_COST);
10312
10313 format %{
10314 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpN\n\t"
10315 %}
10316
10317 ins_encode %{
10318 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10319 as_Register($op1$$reg), as_Register($op2$$reg),
10320 as_Register($dst$$reg), as_Register($src$$reg));
10321 %}
10322
10323 ins_pipe(pipe_class_compare);
10324 %}
10325
10326 instruct cmovI_cmpP(iRegINoSp dst, iRegI src, iRegP op1, iRegP op2, cmpOpU cop) %{
10327 match(Set dst (CMoveI (Binary cop (CmpP op1 op2)) (Binary dst src)));
10328 ins_cost(ALU_COST + BRANCH_COST);
10329
10330 format %{
10331 "CMoveI $dst, ($op1 $cop $op2), $dst, $src\t#@cmovI_cmpP\n\t"
10332 %}
10333
10334 ins_encode %{
10335 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10336 as_Register($op1$$reg), as_Register($op2$$reg),
10337 as_Register($dst$$reg), as_Register($src$$reg));
10338 %}
10339
10340 ins_pipe(pipe_class_compare);
10341 %}
10342
10343 // --------- CMoveL ---------
10344
10345 instruct cmovL_cmpL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOp cop) %{
10346 match(Set dst (CMoveL (Binary cop (CmpL op1 op2)) (Binary dst src)));
10347 ins_cost(ALU_COST + BRANCH_COST);
10348
10349 format %{
10350 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpL\n\t"
10351 %}
10352
10353 ins_encode %{
10354 __ enc_cmove($cop$$cmpcode,
10355 as_Register($op1$$reg), as_Register($op2$$reg),
10356 as_Register($dst$$reg), as_Register($src$$reg));
10357 %}
10358
10359 ins_pipe(pipe_class_compare);
10360 %}
10361
10362 instruct cmovL_cmpUL(iRegLNoSp dst, iRegL src, iRegL op1, iRegL op2, cmpOpU cop) %{
10363 match(Set dst (CMoveL (Binary cop (CmpUL op1 op2)) (Binary dst src)));
10364 ins_cost(ALU_COST + BRANCH_COST);
10365
10366 format %{
10367 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpUL\n\t"
10368 %}
10369
10370 ins_encode %{
10371 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10372 as_Register($op1$$reg), as_Register($op2$$reg),
10373 as_Register($dst$$reg), as_Register($src$$reg));
10374 %}
10375
10376 ins_pipe(pipe_class_compare);
10377 %}
10378
10379 instruct cmovL_cmpI(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOp cop) %{
10380 match(Set dst (CMoveL (Binary cop (CmpI op1 op2)) (Binary dst src)));
10381 ins_cost(ALU_COST + BRANCH_COST);
10382
10383 format %{
10384 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpI\n\t"
10385 %}
10386
10387 ins_encode %{
10388 __ enc_cmove($cop$$cmpcode,
10389 as_Register($op1$$reg), as_Register($op2$$reg),
10390 as_Register($dst$$reg), as_Register($src$$reg));
10391 %}
10392
10393 ins_pipe(pipe_class_compare);
10394 %}
10395
10396 instruct cmovL_cmpU(iRegLNoSp dst, iRegL src, iRegI op1, iRegI op2, cmpOpU cop) %{
10397 match(Set dst (CMoveL (Binary cop (CmpU op1 op2)) (Binary dst src)));
10398 ins_cost(ALU_COST + BRANCH_COST);
10399
10400 format %{
10401 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpU\n\t"
10402 %}
10403
10404 ins_encode %{
10405 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10406 as_Register($op1$$reg), as_Register($op2$$reg),
10407 as_Register($dst$$reg), as_Register($src$$reg));
10408 %}
10409
10410 ins_pipe(pipe_class_compare);
10411 %}
10412
10413 instruct cmovL_cmpF(iRegLNoSp dst, iRegL src, fRegF op1, fRegF op2, cmpOp cop) %{
10414 match(Set dst (CMoveL (Binary cop (CmpF op1 op2)) (Binary dst src)));
10415 ins_cost(ALU_COST + BRANCH_COST);
10416
10417 format %{
10418 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpF\n\t"
10419 %}
10420
10421 ins_encode %{
10422 __ enc_cmove_cmp_fp($cop$$cmpcode,
10423 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10424 as_Register($dst$$reg), as_Register($src$$reg), true /* is_single */);
10425 %}
10426
10427 ins_pipe(pipe_class_compare);
10428 %}
10429
10430 instruct cmovL_cmpD(iRegLNoSp dst, iRegL src, fRegD op1, fRegD op2, cmpOp cop) %{
10431 match(Set dst (CMoveL (Binary cop (CmpD op1 op2)) (Binary dst src)));
10432 ins_cost(ALU_COST + BRANCH_COST);
10433
10434 format %{
10435 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpD\n\t"
10436 %}
10437
10438 ins_encode %{
10439 __ enc_cmove_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
10440 as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
10441 as_Register($dst$$reg), as_Register($src$$reg), false /* is_single */);
10442 %}
10443
10444 ins_pipe(pipe_class_compare);
10445 %}
10446
10447 instruct cmovL_cmpN(iRegLNoSp dst, iRegL src, iRegN op1, iRegN op2, cmpOpU cop) %{
10448 match(Set dst (CMoveL (Binary cop (CmpN op1 op2)) (Binary dst src)));
10449 ins_cost(ALU_COST + BRANCH_COST);
10450
10451 format %{
10452 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpN\n\t"
10453 %}
10454
10455 ins_encode %{
10456 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10457 as_Register($op1$$reg), as_Register($op2$$reg),
10458 as_Register($dst$$reg), as_Register($src$$reg));
10459 %}
10460
10461 ins_pipe(pipe_class_compare);
10462 %}
10463
10464 instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop) %{
10465 match(Set dst (CMoveL (Binary cop (CmpP op1 op2)) (Binary dst src)));
10466 ins_cost(ALU_COST + BRANCH_COST);
10467
10468 format %{
10469 "CMoveL $dst, ($op1 $cop $op2), $dst, $src\t#@cmovL_cmpP\n\t"
10470 %}
10471
10472 ins_encode %{
10473 __ enc_cmove($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
10474 as_Register($op1$$reg), as_Register($op2$$reg),
10475 as_Register($dst$$reg), as_Register($src$$reg));
10476 %}
10477
10478 ins_pipe(pipe_class_compare);
10479 %}
10480
10481 // ============================================================================
10482 // Procedure Call/Return Instructions
10483
10484 // Call Java Static Instruction
10485 // Note: If this code changes, the corresponding ret_addr_offset() and
10486 // compute_padding() functions will have to be adjusted.
10487 instruct CallStaticJavaDirect(method meth)
10488 %{
10489 match(CallStaticJava);
10490
10491 effect(USE meth);
10492
10493 ins_cost(BRANCH_COST);
10494
10495 format %{ "CALL,static $meth\t#@CallStaticJavaDirect" %}
10496
10497 ins_encode(riscv_enc_java_static_call(meth),
10498 riscv_enc_call_epilog);
10499
10500 ins_pipe(pipe_class_call);
10501 ins_alignment(4);
10502 %}
10503
10504 // TO HERE
10505
10506 // Call Java Dynamic Instruction
10507 // Note: If this code changes, the corresponding ret_addr_offset() and
10508 // compute_padding() functions will have to be adjusted.
10509 instruct CallDynamicJavaDirect(method meth)
10510 %{
10511 match(CallDynamicJava);
10512
10513 effect(USE meth);
10514
10515 ins_cost(BRANCH_COST + ALU_COST * 5);
10516
10517 format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %}
10518
10519 ins_encode(riscv_enc_java_dynamic_call(meth),
10520 riscv_enc_call_epilog);
10521
10522 ins_pipe(pipe_class_call);
10523 ins_alignment(4);
10524 %}
10525
10526 // Call Runtime Instruction
10527
10528 instruct CallRuntimeDirect(method meth)
10529 %{
10530 match(CallRuntime);
10531
10532 effect(USE meth);
10533
10534 ins_cost(BRANCH_COST);
10535
10536 format %{ "CALL, runtime $meth\t#@CallRuntimeDirect" %}
10537
10538 ins_encode(riscv_enc_java_to_runtime(meth));
10539
10540 ins_pipe(pipe_class_call);
10541 %}
10542
10543 // Call Runtime Instruction
10544
10545 instruct CallLeafDirect(method meth)
10546 %{
10547 match(CallLeaf);
10548
10549 effect(USE meth);
10550
10551 ins_cost(BRANCH_COST);
10552
10553 format %{ "CALL, runtime leaf $meth\t#@CallLeafDirect" %}
10554
10555 ins_encode(riscv_enc_java_to_runtime(meth));
10556
10557 ins_pipe(pipe_class_call);
10558 %}
10559
10560 // Call Runtime Instruction without safepoint and with vector arguments
10561
10562 instruct CallLeafDirectVector(method meth)
10563 %{
10564 match(CallLeafVector);
10565
10566 effect(USE meth);
10567
10568 ins_cost(BRANCH_COST);
10569
10570 format %{ "CALL, runtime leaf vector $meth" %}
10571
10572 ins_encode(riscv_enc_java_to_runtime(meth));
10573
10574 ins_pipe(pipe_class_call);
10575 %}
10576
10577 // Call Runtime Instruction
10578
10579 instruct CallLeafNoFPDirect(method meth)
10580 %{
10581 match(CallLeafNoFP);
10582
10583 effect(USE meth);
10584
10585 ins_cost(BRANCH_COST);
10586
10587 format %{ "CALL, runtime leaf nofp $meth\t#@CallLeafNoFPDirect" %}
10588
10589 ins_encode(riscv_enc_java_to_runtime(meth));
10590
10591 ins_pipe(pipe_class_call);
10592 %}
10593
10594 // ============================================================================
10595 // Partial Subtype Check
10596 //
10597 // superklass array for an instance of the superklass. Set a hidden
10598 // internal cache on a hit (cache is checked with exposed code in
10599 // gen_subtype_check()). Return zero for a hit. The encoding
10600 // ALSO sets flags.
10601
10602 instruct partialSubtypeCheck(iRegP_R15 result, iRegP_R14 sub, iRegP_R10 super, iRegP_R12 tmp, rFlagsReg cr)
10603 %{
10604 predicate(!UseSecondarySupersTable);
10605 match(Set result (PartialSubtypeCheck sub super));
10606 effect(KILL tmp, KILL cr);
10607
10608 ins_cost(20 * DEFAULT_COST);
10609 format %{ "partialSubtypeCheck $result, $sub, $super\t#@partialSubtypeCheck" %}
10610
10611 ins_encode(riscv_enc_partial_subtype_check(sub, super, tmp, result));
10612
10613 opcode(0x1); // Force zero of result reg on hit
10614
10615 ins_pipe(pipe_class_memory);
10616 %}
10617
10618 // Two versions of partialSubtypeCheck, both used when we need to
10619 // search for a super class in the secondary supers array. The first
10620 // is used when we don't know _a priori_ the class being searched
10621 // for. The second, far more common, is used when we do know: this is
10622 // used for instanceof, checkcast, and any case where C2 can determine
10623 // it by constant propagation.
10624
10625 instruct partialSubtypeCheckVarSuper(iRegP_R14 sub, iRegP_R10 super, iRegP_R15 result,
10626 iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13,
10627 iRegP_R16 tmpR16, rFlagsReg cr)
10628 %{
10629 predicate(UseSecondarySupersTable);
10630 match(Set result (PartialSubtypeCheck sub super));
10631 effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10632
10633 ins_cost(10 * DEFAULT_COST); // slightly larger than the next version
10634 format %{ "partialSubtypeCheck $result, $sub, $super" %}
10635
10636 ins_encode %{
10637 __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register, $result$$Register,
10638 $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10639 $tmpR16$$Register, nullptr /*L_success*/);
10640 %}
10641
10642 ins_pipe(pipe_class_memory);
10643 %}
10644
10645 instruct partialSubtypeCheckConstSuper(iRegP_R14 sub, iRegP_R10 super_reg, immP super_con, iRegP_R15 result,
10646 iRegP_R11 tmpR11, iRegP_R12 tmpR12, iRegP_R13 tmpR13, iRegP_R16 tmpR16, rFlagsReg cr)
10647 %{
10648 predicate(UseSecondarySupersTable);
10649 match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
10650 effect(TEMP tmpR11, TEMP tmpR12, TEMP tmpR13, TEMP tmpR16, KILL cr);
10651
10652 ins_cost(5 * DEFAULT_COST); // needs to be less than competing nodes
10653 format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
10654
10655 ins_encode %{
10656 bool success = false;
10657 u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
10658 if (InlineSecondarySupersTest) {
10659 success = __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register, $result$$Register,
10660 $tmpR11$$Register, $tmpR12$$Register, $tmpR13$$Register,
10661 $tmpR16$$Register, super_klass_slot);
10662 } else {
10663 address call = __ reloc_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
10664 success = (call != nullptr);
10665 }
10666 if (!success) {
10667 ciEnv::current()->record_failure("CodeCache is full");
10668 return;
10669 }
10670 %}
10671
10672 ins_pipe(pipe_class_memory);
10673 %}
10674
10675 instruct string_compareU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10676 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10677 %{
10678 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UU);
10679 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10680 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10681
10682 format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareU" %}
10683 ins_encode %{
10684 // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10685 __ string_compare($str1$$Register, $str2$$Register,
10686 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10687 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10688 StrIntrinsicNode::UU);
10689 %}
10690 ins_pipe(pipe_class_memory);
10691 %}
10692
10693 instruct string_compareL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10694 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10695 %{
10696 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LL);
10697 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10698 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10699
10700 format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareL" %}
10701 ins_encode %{
10702 __ string_compare($str1$$Register, $str2$$Register,
10703 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10704 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10705 StrIntrinsicNode::LL);
10706 %}
10707 ins_pipe(pipe_class_memory);
10708 %}
10709
10710 instruct string_compareUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10711 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3, rFlagsReg cr)
10712 %{
10713 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::UL);
10714 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10715 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10716
10717 format %{"String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareUL" %}
10718 ins_encode %{
10719 __ string_compare($str1$$Register, $str2$$Register,
10720 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10721 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10722 StrIntrinsicNode::UL);
10723 %}
10724 ins_pipe(pipe_class_memory);
10725 %}
10726
10727 instruct string_compareLU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10728 iRegI_R10 result, iRegP_R28 tmp1, iRegL_R29 tmp2, iRegL_R30 tmp3,
10729 rFlagsReg cr)
10730 %{
10731 predicate(!UseRVV && ((StrCompNode *)n)->encoding() == StrIntrinsicNode::LU);
10732 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10733 effect(KILL tmp1, KILL tmp2, KILL tmp3, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
10734
10735 format %{ "String Compare $str1, $cnt1, $str2, $cnt2 -> $result\t#@string_compareLU" %}
10736 ins_encode %{
10737 __ string_compare($str1$$Register, $str2$$Register,
10738 $cnt1$$Register, $cnt2$$Register, $result$$Register,
10739 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10740 StrIntrinsicNode::LU);
10741 %}
10742 ins_pipe(pipe_class_memory);
10743 %}
10744
10745 instruct string_indexofUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10746 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10747 iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10748 %{
10749 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10750 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10751 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10752 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10753
10754 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %}
10755 ins_encode %{
10756 __ string_indexof($str1$$Register, $str2$$Register,
10757 $cnt1$$Register, $cnt2$$Register,
10758 $tmp1$$Register, $tmp2$$Register,
10759 $tmp3$$Register, $tmp4$$Register,
10760 $tmp5$$Register, $tmp6$$Register,
10761 $result$$Register, StrIntrinsicNode::UU);
10762 %}
10763 ins_pipe(pipe_class_memory);
10764 %}
10765
10766 instruct string_indexofLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10767 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10768 iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10769 %{
10770 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10771 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10772 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10773 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10774
10775 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %}
10776 ins_encode %{
10777 __ string_indexof($str1$$Register, $str2$$Register,
10778 $cnt1$$Register, $cnt2$$Register,
10779 $tmp1$$Register, $tmp2$$Register,
10780 $tmp3$$Register, $tmp4$$Register,
10781 $tmp5$$Register, $tmp6$$Register,
10782 $result$$Register, StrIntrinsicNode::LL);
10783 %}
10784 ins_pipe(pipe_class_memory);
10785 %}
10786
10787 instruct string_indexofUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2, iRegI_R14 cnt2,
10788 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
10789 iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6, rFlagsReg cr)
10790 %{
10791 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10792 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
10793 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP_DEF result,
10794 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6, KILL cr);
10795 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %}
10796
10797 ins_encode %{
10798 __ string_indexof($str1$$Register, $str2$$Register,
10799 $cnt1$$Register, $cnt2$$Register,
10800 $tmp1$$Register, $tmp2$$Register,
10801 $tmp3$$Register, $tmp4$$Register,
10802 $tmp5$$Register, $tmp6$$Register,
10803 $result$$Register, StrIntrinsicNode::UL);
10804 %}
10805 ins_pipe(pipe_class_memory);
10806 %}
10807
10808 instruct string_indexof_conUU(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10809 immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10810 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10811 %{
10812 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
10813 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10814 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10815 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10816
10817 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %}
10818
10819 ins_encode %{
10820 int icnt2 = (int)$int_cnt2$$constant;
10821 __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10822 $cnt1$$Register, zr,
10823 $tmp1$$Register, $tmp2$$Register,
10824 $tmp3$$Register, $tmp4$$Register,
10825 icnt2, $result$$Register, StrIntrinsicNode::UU);
10826 %}
10827 ins_pipe(pipe_class_memory);
10828 %}
10829
10830 instruct string_indexof_conLL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10831 immI_le_4 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10832 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10833 %{
10834 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
10835 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10836 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10837 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10838
10839 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %}
10840 ins_encode %{
10841 int icnt2 = (int)$int_cnt2$$constant;
10842 __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10843 $cnt1$$Register, zr,
10844 $tmp1$$Register, $tmp2$$Register,
10845 $tmp3$$Register, $tmp4$$Register,
10846 icnt2, $result$$Register, StrIntrinsicNode::LL);
10847 %}
10848 ins_pipe(pipe_class_memory);
10849 %}
10850
10851 instruct string_indexof_conUL(iRegP_R11 str1, iRegI_R12 cnt1, iRegP_R13 str2,
10852 immI_1 int_cnt2, iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10853 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10854 %{
10855 predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
10856 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
10857 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, TEMP_DEF result,
10858 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10859
10860 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %}
10861 ins_encode %{
10862 int icnt2 = (int)$int_cnt2$$constant;
10863 __ string_indexof_linearscan($str1$$Register, $str2$$Register,
10864 $cnt1$$Register, zr,
10865 $tmp1$$Register, $tmp2$$Register,
10866 $tmp3$$Register, $tmp4$$Register,
10867 icnt2, $result$$Register, StrIntrinsicNode::UL);
10868 %}
10869 ins_pipe(pipe_class_memory);
10870 %}
10871
10872 instruct stringU_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10873 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10874 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10875 %{
10876 match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10877 predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
10878 effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10879 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10880
10881 format %{ "StringUTF16 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10882 ins_encode %{
10883 __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10884 $result$$Register, $tmp1$$Register, $tmp2$$Register,
10885 $tmp3$$Register, $tmp4$$Register, false /* isU */);
10886 %}
10887 ins_pipe(pipe_class_memory);
10888 %}
10889
10890
10891 instruct stringL_indexof_char(iRegP_R11 str1, iRegI_R12 cnt1, iRegI_R13 ch,
10892 iRegI_R10 result, iRegINoSp tmp1, iRegINoSp tmp2,
10893 iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
10894 %{
10895 match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
10896 predicate(!UseRVV && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
10897 effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, TEMP_DEF result,
10898 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
10899
10900 format %{ "StringLatin1 IndexOf char[] $str1, $cnt1, $ch -> $result" %}
10901 ins_encode %{
10902 __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
10903 $result$$Register, $tmp1$$Register, $tmp2$$Register,
10904 $tmp3$$Register, $tmp4$$Register, true /* isL */);
10905 %}
10906 ins_pipe(pipe_class_memory);
10907 %}
10908
10909 // clearing of an array
10910 instruct clearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, iRegP_R30 tmp1,
10911 iRegP_R31 tmp2, rFlagsReg cr, Universe dummy)
10912 %{
10913 // temp registers must match the one used in StubGenerator::generate_zero_blocks()
10914 predicate(UseBlockZeroing || !UseRVV);
10915 match(Set dummy (ClearArray cnt base));
10916 effect(USE_KILL cnt, USE_KILL base, TEMP tmp1, TEMP tmp2, KILL cr);
10917
10918 ins_cost(4 * DEFAULT_COST);
10919 format %{ "ClearArray $cnt, $base\t#@clearArray_reg_reg" %}
10920
10921 ins_encode %{
10922 address tpc = __ zero_words($base$$Register, $cnt$$Register);
10923 if (tpc == nullptr) {
10924 ciEnv::current()->record_failure("CodeCache is full");
10925 return;
10926 }
10927 %}
10928
10929 ins_pipe(pipe_class_memory);
10930 %}
10931
10932 instruct clearArray_imm_reg(immL cnt, iRegP_R28 base, Universe dummy, rFlagsReg cr)
10933 %{
10934 predicate(!UseRVV && (uint64_t)n->in(2)->get_long()
10935 < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
10936 match(Set dummy (ClearArray cnt base));
10937 effect(USE_KILL base, KILL cr);
10938
10939 ins_cost(4 * DEFAULT_COST);
10940 format %{ "ClearArray $cnt, $base\t#@clearArray_imm_reg" %}
10941
10942 ins_encode %{
10943 __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
10944 %}
10945
10946 ins_pipe(pipe_class_memory);
10947 %}
10948
10949 instruct string_equalsL(iRegP_R11 str1, iRegP_R13 str2, iRegI_R14 cnt,
10950 iRegI_R10 result, rFlagsReg cr)
10951 %{
10952 predicate(!UseRVV && ((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
10953 match(Set result (StrEquals (Binary str1 str2) cnt));
10954 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
10955
10956 format %{ "String Equals $str1, $str2, $cnt -> $result\t#@string_equalsL" %}
10957 ins_encode %{
10958 // Count is in 8-bit bytes; non-Compact chars are 16 bits.
10959 __ string_equals($str1$$Register, $str2$$Register,
10960 $result$$Register, $cnt$$Register);
10961 %}
10962 ins_pipe(pipe_class_memory);
10963 %}
10964
10965 instruct array_equalsB(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10966 iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10967 %{
10968 predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
10969 match(Set result (AryEq ary1 ary2));
10970 effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10971
10972 format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsB // KILL all" %}
10973 ins_encode %{
10974 __ arrays_equals($ary1$$Register, $ary2$$Register,
10975 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10976 $result$$Register, 1);
10977 %}
10978 ins_pipe(pipe_class_memory);
10979 %}
10980
10981 instruct array_equalsC(iRegP_R11 ary1, iRegP_R12 ary2, iRegI_R10 result,
10982 iRegP_R13 tmp1, iRegP_R14 tmp2, iRegP_R15 tmp3)
10983 %{
10984 predicate(!UseRVV && ((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
10985 match(Set result (AryEq ary1 ary2));
10986 effect(USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3);
10987
10988 format %{ "Array Equals $ary1, $ary2 -> $result\t#@array_equalsC // KILL all" %}
10989 ins_encode %{
10990 __ arrays_equals($ary1$$Register, $ary2$$Register,
10991 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
10992 $result$$Register, 2);
10993 %}
10994 ins_pipe(pipe_class_memory);
10995 %}
10996
10997 // fast ArraysSupport.vectorizedHashCode
10998 instruct arrays_hashcode(iRegP_R11 ary, iRegI_R12 cnt, iRegI_R10 result, immI basic_type,
10999 iRegLNoSp tmp1, iRegLNoSp tmp2,
11000 iRegLNoSp tmp3, iRegLNoSp tmp4,
11001 iRegLNoSp tmp5, iRegLNoSp tmp6, rFlagsReg cr)
11002 %{
11003 match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
11004 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
11005 USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
11006
11007 format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result // KILL all" %}
11008 ins_encode %{
11009 __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
11010 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
11011 $tmp4$$Register, $tmp5$$Register, $tmp6$$Register,
11012 (BasicType)$basic_type$$constant);
11013 %}
11014 ins_pipe(pipe_class_memory);
11015 %}
11016
11017 // ============================================================================
11018 // Safepoint Instructions
11019
11020 instruct safePoint(iRegP poll)
11021 %{
11022 match(SafePoint poll);
11023
11024 ins_cost(2 * LOAD_COST);
11025 format %{
11026 "lwu zr, [$poll]\t# Safepoint: poll for GC, #@safePoint"
11027 %}
11028 ins_encode %{
11029 __ read_polling_page(as_Register($poll$$reg), 0, relocInfo::poll_type);
11030 %}
11031 ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
11032 %}
11033
11034 // ============================================================================
11035 // This name is KNOWN by the ADLC and cannot be changed.
11036 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
11037 // for this guy.
11038 instruct tlsLoadP(javaThread_RegP dst)
11039 %{
11040 match(Set dst (ThreadLocal));
11041
11042 ins_cost(0);
11043
11044 format %{ " -- \t// $dst=Thread::current(), empty, #@tlsLoadP" %}
11045
11046 size(0);
11047
11048 ins_encode( /*empty*/ );
11049
11050 ins_pipe(pipe_class_empty);
11051 %}
11052
11053 // inlined locking and unlocking
11054 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
11055 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box,
11056 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
11057 %{
11058 predicate(LockingMode != LM_LIGHTWEIGHT);
11059 match(Set cr (FastLock object box));
11060 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
11061
11062 ins_cost(10 * DEFAULT_COST);
11063 format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLock" %}
11064
11065 ins_encode %{
11066 __ fast_lock($object$$Register, $box$$Register,
11067 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
11068 %}
11069
11070 ins_pipe(pipe_serial);
11071 %}
11072
11073 // using t1 as the 'flag' register to bridge the BoolNode producers and consumers
11074 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2)
11075 %{
11076 predicate(LockingMode != LM_LIGHTWEIGHT);
11077 match(Set cr (FastUnlock object box));
11078 effect(TEMP tmp1, TEMP tmp2);
11079
11080 ins_cost(10 * DEFAULT_COST);
11081 format %{ "fastunlock $object,$box\t! kills $tmp1, $tmp2, #@cmpFastUnlock" %}
11082
11083 ins_encode %{
11084 __ fast_unlock($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register);
11085 %}
11086
11087 ins_pipe(pipe_serial);
11088 %}
11089
11090 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box,
11091 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4)
11092 %{
11093 predicate(LockingMode == LM_LIGHTWEIGHT);
11094 match(Set cr (FastLock object box));
11095 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4);
11096
11097 ins_cost(10 * DEFAULT_COST);
11098 format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3,$tmp4 #@cmpFastLockLightweight" %}
11099
11100 ins_encode %{
11101 __ fast_lock_lightweight($object$$Register, $box$$Register,
11102 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
11103 %}
11104
11105 ins_pipe(pipe_serial);
11106 %}
11107
11108 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box,
11109 iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3)
11110 %{
11111 predicate(LockingMode == LM_LIGHTWEIGHT);
11112 match(Set cr (FastUnlock object box));
11113 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
11114
11115 ins_cost(10 * DEFAULT_COST);
11116 format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %}
11117
11118 ins_encode %{
11119 __ fast_unlock_lightweight($object$$Register, $box$$Register,
11120 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
11121 %}
11122
11123 ins_pipe(pipe_serial);
11124 %}
11125
11126 // Tail Call; Jump from runtime stub to Java code.
11127 // Also known as an 'interprocedural jump'.
11128 // Target of jump will eventually return to caller.
11129 // TailJump below removes the return address.
11130 // Don't use fp for 'jump_target' because a MachEpilogNode has already been
11131 // emitted just above the TailCall which has reset fp to the caller state.
11132 instruct TailCalljmpInd(iRegPNoSpNoFp jump_target, inline_cache_RegP method_oop)
11133 %{
11134 match(TailCall jump_target method_oop);
11135
11136 ins_cost(BRANCH_COST);
11137
11138 format %{ "jalr $jump_target\t# $method_oop holds method oop, #@TailCalljmpInd." %}
11139
11140 ins_encode(riscv_enc_tail_call(jump_target));
11141
11142 ins_pipe(pipe_class_call);
11143 %}
11144
11145 instruct TailjmpInd(iRegPNoSpNoFp jump_target, iRegP_R10 ex_oop)
11146 %{
11147 match(TailJump jump_target ex_oop);
11148
11149 ins_cost(ALU_COST + BRANCH_COST);
11150
11151 format %{ "jalr $jump_target\t# $ex_oop holds exception oop, #@TailjmpInd." %}
11152
11153 ins_encode(riscv_enc_tail_jmp(jump_target));
11154
11155 ins_pipe(pipe_class_call);
11156 %}
11157
11158 // Forward exception.
11159 instruct ForwardExceptionjmp()
11160 %{
11161 match(ForwardException);
11162
11163 ins_cost(BRANCH_COST);
11164
11165 format %{ "j forward_exception_stub\t#@ForwardException" %}
11166
11167 ins_encode %{
11168 __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
11169 %}
11170
11171 ins_pipe(pipe_class_call);
11172 %}
11173
11174 // Create exception oop: created by stack-crawling runtime code.
11175 // Created exception is now available to this handler, and is setup
11176 // just prior to jumping to this handler. No code emitted.
11177 instruct CreateException(iRegP_R10 ex_oop)
11178 %{
11179 match(Set ex_oop (CreateEx));
11180
11181 ins_cost(0);
11182 format %{ " -- \t// exception oop; no code emitted, #@CreateException" %}
11183
11184 size(0);
11185
11186 ins_encode( /*empty*/ );
11187
11188 ins_pipe(pipe_class_empty);
11189 %}
11190
11191 // Rethrow exception: The exception oop will come in the first
11192 // argument position. Then JUMP (not call) to the rethrow stub code.
11193 instruct RethrowException()
11194 %{
11195 match(Rethrow);
11196
11197 ins_cost(BRANCH_COST);
11198
11199 format %{ "j rethrow_stub\t#@RethrowException" %}
11200
11201 ins_encode(riscv_enc_rethrow());
11202
11203 ins_pipe(pipe_class_call);
11204 %}
11205
11206 // Return Instruction
11207 // epilog node loads ret address into ra as part of frame pop
11208 instruct Ret()
11209 %{
11210 match(Return);
11211
11212 ins_cost(BRANCH_COST);
11213 format %{ "ret\t// return register, #@Ret" %}
11214
11215 ins_encode(riscv_enc_ret());
11216
11217 ins_pipe(pipe_branch);
11218 %}
11219
11220 // Die now.
11221 instruct ShouldNotReachHere() %{
11222 match(Halt);
11223
11224 ins_cost(BRANCH_COST);
11225
11226 format %{ "#@ShouldNotReachHere" %}
11227
11228 ins_encode %{
11229 if (is_reachable()) {
11230 const char* str = __ code_string(_halt_reason);
11231 __ stop(str);
11232 }
11233 %}
11234
11235 ins_pipe(pipe_class_default);
11236 %}
11237
11238
11239 //----------PEEPHOLE RULES-----------------------------------------------------
11240 // These must follow all instruction definitions as they use the names
11241 // defined in the instructions definitions.
11242 //
11243 // peepmatch ( root_instr_name [preceding_instruction]* );
11244 //
11245 // peepconstraint %{
11246 // (instruction_number.operand_name relational_op instruction_number.operand_name
11247 // [, ...] );
11248 // // instruction numbers are zero-based using left to right order in peepmatch
11249 //
11250 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
11251 // // provide an instruction_number.operand_name for each operand that appears
11252 // // in the replacement instruction's match rule
11253 //
11254 // ---------VM FLAGS---------------------------------------------------------
11255 //
11256 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11257 //
11258 // Each peephole rule is given an identifying number starting with zero and
11259 // increasing by one in the order seen by the parser. An individual peephole
11260 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11261 // on the command-line.
11262 //
11263 // ---------CURRENT LIMITATIONS----------------------------------------------
11264 //
11265 // Only match adjacent instructions in same basic block
11266 // Only equality constraints
11267 // Only constraints between operands, not (0.dest_reg == RAX_enc)
11268 // Only one replacement instruction
11269 //
11270 //----------SMARTSPILL RULES---------------------------------------------------
11271 // These must follow all instruction definitions as they use the names
11272 // defined in the instructions definitions.
11273
11274 // Local Variables:
11275 // mode: c++
11276 // End: