1 //
2 // Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
23
24 source_hpp %{
25
26 #include "gc/g1/c2/g1BarrierSetC2.hpp"
27 #include "gc/shared/gc_globals.hpp"
28
29 %}
30
31 source %{
32
33 #include "gc/g1/g1BarrierSetAssembler_aarch64.hpp"
34 #include "gc/g1/g1BarrierSetRuntime.hpp"
35
36 static void write_barrier_pre(MacroAssembler* masm,
37 const MachNode* node,
38 Register obj,
39 Register pre_val,
40 Register tmp1,
41 Register tmp2,
42 RegSet preserve = RegSet(),
43 RegSet no_preserve = RegSet()) {
44 if (!G1PreBarrierStubC2::needs_barrier(node)) {
45 return;
46 }
47 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
48 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
49 G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node);
50 for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) {
51 stub->preserve(*reg);
52 }
53 for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) {
54 stub->dont_preserve(*reg);
55 }
56 g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, rthread, tmp1, tmp2, stub);
57 }
58
59 static void write_barrier_post(MacroAssembler* masm,
60 const MachNode* node,
61 Register store_addr,
62 Register new_val,
63 Register tmp1,
64 Register tmp2) {
65 if (!G1BarrierStubC2::needs_post_barrier(node)) {
66 return;
67 }
68 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
69 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
70 bool new_val_may_be_null = G1BarrierStubC2::post_new_val_may_be_null(node);
71 g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, rthread, tmp1, tmp2, new_val_may_be_null);
72 }
73
74 %}
75
76 // TODO 8350865 (same applies to g1StoreLSpecialTwoOops)
77 // - Do no set/overwrite barrier data here, also handle G1C2BarrierPostNotNull
78 // - Move this into the .m4?
79 instruct g1StoreLSpecialOneOopOff0(indirect mem, iRegLNoSp src, immI0 off, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
80 %{
81 predicate(UseG1GC);
82 match(Set mem (StoreLSpecial mem (Binary src off)));
83 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
84 ins_cost(INSN_COST);
85 format %{ "str $src, $mem\t# g1StoreLSpecialOneOopOff0" %}
86 ins_encode %{
87 ((MachNode*)this)->set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
88 write_barrier_pre(masm, this,
89 $mem$$Register /* obj */,
90 $tmp1$$Register /* pre_val */,
91 $tmp2$$Register /* tmp1 */,
92 $tmp3$$Register /* tmp2 */,
93 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
94
95 __ str($src$$Register, $mem$$Register);
96
97 // Extract the narrow oop field value
98 __ ubfm($tmp1$$Register, $src$$Register, 0, 31);
99 __ decode_heap_oop($tmp1$$Register, $tmp1$$Register);
100 write_barrier_post(masm, this,
101 $mem$$Register /* store_addr */,
102 $tmp1$$Register /* new_val */,
103 $tmp2$$Register /* tmp1 */,
104 $tmp3$$Register /* tmp2 */);
105 %}
106 ins_pipe(istore_reg_mem);
107 %}
108
109 instruct g1StoreLSpecialOneOopOff4(indirect mem, iRegLNoSp src, immI_4 off, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4, rFlagsReg cr)
110 %{
111 predicate(UseG1GC);
112 match(Set mem (StoreLSpecial mem (Binary src off)));
113 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
114 ins_cost(INSN_COST);
115 format %{ "str $src, $mem\t# g1StoreLSpecialOneOopOff4" %}
116 ins_encode %{
117 ((MachNode*)this)->set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
118
119 // Adjust address to point to narrow oop
120 __ add($tmp4$$Register, $mem$$Register, 4);
121 write_barrier_pre(masm, this,
122 $tmp4$$Register /* obj */,
123 $tmp1$$Register /* pre_val */,
124 $tmp2$$Register /* tmp1 */,
125 $tmp3$$Register /* tmp2 */,
126 RegSet::of($mem$$Register, $src$$Register, $tmp4$$Register) /* preserve */);
127
128 __ str($src$$Register, $mem$$Register);
129
130 // Shift long value to extract the narrow oop field value
131 __ lsr($tmp1$$Register, $src$$Register, 32);
132 __ decode_heap_oop($tmp1$$Register, $tmp1$$Register);
133 write_barrier_post(masm, this,
134 $tmp4$$Register /* store_addr */,
135 $tmp1$$Register /* new_val */,
136 $tmp2$$Register /* tmp1 */,
137 $tmp3$$Register /* tmp2 */);
138 %}
139 ins_pipe(istore_reg_mem);
140 %}
141
142 instruct g1StoreLSpecialTwoOops(indirect mem, iRegLNoSp src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4, rFlagsReg cr)
143 %{
144 predicate(UseG1GC);
145 match(Set mem (StoreLSpecial mem src));
146 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
147 ins_cost(INSN_COST);
148 format %{ "str $src, $mem\t# g1StoreLSpecialTwoOops" %}
149 ins_encode %{
150 ((MachNode*)this)->set_barrier_data(G1C2BarrierPre | G1C2BarrierPost);
151
152 write_barrier_pre(masm, this,
153 $mem$$Register /* obj */,
154 $tmp1$$Register /* pre_val */,
155 $tmp2$$Register /* tmp1 */,
156 $tmp3$$Register /* tmp2 */,
157 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
158 // Adjust address to point to the second narrow oop in the long value
159 __ add($tmp4$$Register, $mem$$Register, 4);
160 write_barrier_pre(masm, this,
161 $tmp4$$Register /* obj */,
162 $tmp1$$Register /* pre_val */,
163 $tmp2$$Register /* tmp1 */,
164 $tmp3$$Register /* tmp2 */,
165 RegSet::of($mem$$Register, $src$$Register, $tmp4$$Register) /* preserve */);
166
167 __ str($src$$Register, $mem$$Register);
168
169 // Zero-extend first narrow oop to long
170 __ ubfm($tmp1$$Register, $src$$Register, 0, 31);
171 __ decode_heap_oop($tmp1$$Register, $tmp1$$Register);
172 write_barrier_post(masm, this,
173 $mem$$Register /* store_addr */,
174 $tmp1$$Register /* new_val */,
175 $tmp2$$Register /* tmp1 */,
176 $tmp3$$Register /* tmp2 */);
177
178 // Shift long value to extract the second narrow oop field value
179 __ lsr($tmp1$$Register, $src$$Register, 32);
180 __ decode_heap_oop($tmp1$$Register, $tmp1$$Register);
181 write_barrier_post(masm, this,
182 $tmp4$$Register /* store_addr */,
183 $tmp1$$Register /* new_val */,
184 $tmp2$$Register /* tmp1 */,
185 $tmp3$$Register /* tmp2 */);
186 %}
187 ins_pipe(istore_reg_mem);
188 %}
189
190
191 // BEGIN This section of the file is automatically generated. Do not edit --------------
192
193 // This section is generated from g1_aarch64.m4
194
195
196 // This pattern is generated automatically from g1_aarch64.m4.
197 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
198 instruct g1StoreP(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
199 %{
200 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
201 match(Set mem (StoreP mem src));
202 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
203 ins_cost(INSN_COST);
204 format %{ "str $src, $mem\t# ptr" %}
205 ins_encode %{
206 write_barrier_pre(masm, this,
207 $mem$$Register /* obj */,
208 $tmp1$$Register /* pre_val */,
209 $tmp2$$Register /* tmp1 */,
210 $tmp3$$Register /* tmp2 */,
211 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
212 __ str($src$$Register, $mem$$Register);
213 write_barrier_post(masm, this,
214 $mem$$Register /* store_addr */,
215 $src$$Register /* new_val */,
216 $tmp2$$Register /* tmp1 */,
217 $tmp3$$Register /* tmp2 */);
218 %}
219 ins_pipe(istore_reg_mem);
220 %}
221
222 // This pattern is generated automatically from g1_aarch64.m4.
223 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
224 instruct g1StorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
225 %{
226 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
227 match(Set mem (StoreP mem src));
228 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
229 ins_cost(VOLATILE_REF_COST);
230 format %{ "stlr $src, $mem\t# ptr" %}
231 ins_encode %{
232 write_barrier_pre(masm, this,
233 $mem$$Register /* obj */,
234 $tmp1$$Register /* pre_val */,
235 $tmp2$$Register /* tmp1 */,
236 $tmp3$$Register /* tmp2 */,
237 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
238 __ stlr($src$$Register, $mem$$Register);
239 write_barrier_post(masm, this,
240 $mem$$Register /* store_addr */,
241 $src$$Register /* new_val */,
242 $tmp2$$Register /* tmp1 */,
243 $tmp3$$Register /* tmp2 */);
244 %}
245 ins_pipe(pipe_class_memory);
246 %}
247
248 // This pattern is generated automatically from g1_aarch64.m4.
249 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
250 instruct g1StoreN(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
251 %{
252 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
253 match(Set mem (StoreN mem src));
254 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
255 ins_cost(INSN_COST);
256 format %{ "strw $src, $mem\t# compressed ptr" %}
257 ins_encode %{
258 write_barrier_pre(masm, this,
259 $mem$$Register /* obj */,
260 $tmp1$$Register /* pre_val */,
261 $tmp2$$Register /* tmp1 */,
262 $tmp3$$Register /* tmp2 */,
263 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
264 __ strw($src$$Register, $mem$$Register);
265 if ((barrier_data() & G1C2BarrierPost) != 0) {
266 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
267 __ decode_heap_oop($tmp1$$Register, $src$$Register);
268 } else {
269 __ decode_heap_oop_not_null($tmp1$$Register, $src$$Register);
270 }
271 }
272 write_barrier_post(masm, this,
273 $mem$$Register /* store_addr */,
274 $tmp1$$Register /* new_val */,
275 $tmp2$$Register /* tmp1 */,
276 $tmp3$$Register /* tmp2 */);
277 %}
278 ins_pipe(istore_reg_mem);
279 %}
280
281 // This pattern is generated automatically from g1_aarch64.m4.
282 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
283 instruct g1StoreNVolatile(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
284 %{
285 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
286 match(Set mem (StoreN mem src));
287 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
288 ins_cost(VOLATILE_REF_COST);
289 format %{ "stlrw $src, $mem\t# compressed ptr" %}
290 ins_encode %{
291 write_barrier_pre(masm, this,
292 $mem$$Register /* obj */,
293 $tmp1$$Register /* pre_val */,
294 $tmp2$$Register /* tmp1 */,
295 $tmp3$$Register /* tmp2 */,
296 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
297 __ stlrw($src$$Register, $mem$$Register);
298 if ((barrier_data() & G1C2BarrierPost) != 0) {
299 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
300 __ decode_heap_oop($tmp1$$Register, $src$$Register);
301 } else {
302 __ decode_heap_oop_not_null($tmp1$$Register, $src$$Register);
303 }
304 }
305 write_barrier_post(masm, this,
306 $mem$$Register /* store_addr */,
307 $tmp1$$Register /* new_val */,
308 $tmp2$$Register /* tmp1 */,
309 $tmp3$$Register /* tmp2 */);
310 %}
311 ins_pipe(pipe_class_memory);
312 %}
313
314 // This pattern is generated automatically from g1_aarch64.m4.
315 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
316 instruct g1EncodePAndStoreN(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
317 %{
318 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
319 match(Set mem (StoreN mem (EncodeP src)));
320 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
321 ins_cost(INSN_COST);
322 format %{ "encode_heap_oop $tmp1, $src\n\t"
323 "strw $tmp1, $mem\t# compressed ptr" %}
324 ins_encode %{
325 write_barrier_pre(masm, this,
326 $mem$$Register /* obj */,
327 $tmp1$$Register /* pre_val */,
328 $tmp2$$Register /* tmp1 */,
329 $tmp3$$Register /* tmp2 */,
330 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
331 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
332 __ encode_heap_oop($tmp1$$Register, $src$$Register);
333 } else {
334 __ encode_heap_oop_not_null($tmp1$$Register, $src$$Register);
335 }
336 __ strw($tmp1$$Register, $mem$$Register);
337 write_barrier_post(masm, this,
338 $mem$$Register /* store_addr */,
339 $src$$Register /* new_val */,
340 $tmp2$$Register /* tmp1 */,
341 $tmp3$$Register /* tmp2 */);
342 %}
343 ins_pipe(istore_reg_mem);
344 %}
345
346 // This pattern is generated automatically from g1_aarch64.m4.
347 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
348 instruct g1EncodePAndStoreNVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
349 %{
350 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0);
351 match(Set mem (StoreN mem (EncodeP src)));
352 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
353 ins_cost(VOLATILE_REF_COST);
354 format %{ "encode_heap_oop $tmp1, $src\n\t"
355 "stlrw $tmp1, $mem\t# compressed ptr" %}
356 ins_encode %{
357 write_barrier_pre(masm, this,
358 $mem$$Register /* obj */,
359 $tmp1$$Register /* pre_val */,
360 $tmp2$$Register /* tmp1 */,
361 $tmp3$$Register /* tmp2 */,
362 RegSet::of($mem$$Register, $src$$Register) /* preserve */);
363 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) {
364 __ encode_heap_oop($tmp1$$Register, $src$$Register);
365 } else {
366 __ encode_heap_oop_not_null($tmp1$$Register, $src$$Register);
367 }
368 __ stlrw($tmp1$$Register, $mem$$Register);
369 write_barrier_post(masm, this,
370 $mem$$Register /* store_addr */,
371 $src$$Register /* new_val */,
372 $tmp2$$Register /* tmp1 */,
373 $tmp3$$Register /* tmp2 */);
374 %}
375 ins_pipe(pipe_class_memory);
376 %}
377
378 // This pattern is generated automatically from g1_aarch64.m4.
379 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
380 instruct g1CompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
381 %{
382 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
383 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
384 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
385 ins_cost(2 * VOLATILE_REF_COST);
386 format %{ "cmpxchg $res = $mem, $oldval, $newval\t# ptr" %}
387 ins_encode %{
388 assert_different_registers($oldval$$Register, $mem$$Register);
389 assert_different_registers($newval$$Register, $mem$$Register);
390 // Pass $oldval to the pre-barrier (instead of loading from $mem), because
391 // $oldval is the only value that can be overwritten.
392 // The same holds for g1CompareAndSwapP and its Acq variant.
393 write_barrier_pre(masm, this,
394 noreg /* obj */,
395 $oldval$$Register /* pre_val */,
396 $tmp1$$Register /* tmp1 */,
397 $tmp2$$Register /* tmp2 */,
398 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
399 RegSet::of($res$$Register) /* no_preserve */);
400 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
401 false /* acquire */, true /* release */, false /* weak */, $res$$Register);
402 write_barrier_post(masm, this,
403 $mem$$Register /* store_addr */,
404 $newval$$Register /* new_val */,
405 $tmp1$$Register /* tmp1 */,
406 $tmp2$$Register /* tmp2 */);
407 %}
408 ins_pipe(pipe_slow);
409 %}
410
411 // This pattern is generated automatically from g1_aarch64.m4.
412 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
413 instruct g1CompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
414 %{
415 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
416 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
417 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
418 ins_cost(VOLATILE_REF_COST);
419 format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# ptr" %}
420 ins_encode %{
421 assert_different_registers($oldval$$Register, $mem$$Register);
422 assert_different_registers($newval$$Register, $mem$$Register);
423 // Pass $oldval to the pre-barrier (instead of loading from $mem), because
424 // $oldval is the only value that can be overwritten.
425 // The same holds for g1CompareAndSwapP and its Acq variant.
426 write_barrier_pre(masm, this,
427 noreg /* obj */,
428 $oldval$$Register /* pre_val */,
429 $tmp1$$Register /* tmp1 */,
430 $tmp2$$Register /* tmp2 */,
431 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
432 RegSet::of($res$$Register) /* no_preserve */);
433 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
434 true /* acquire */, true /* release */, false /* weak */, $res$$Register);
435 write_barrier_post(masm, this,
436 $mem$$Register /* store_addr */,
437 $newval$$Register /* new_val */,
438 $tmp1$$Register /* tmp1 */,
439 $tmp2$$Register /* tmp2 */);
440 %}
441 ins_pipe(pipe_slow);
442 %}
443
444 // This pattern is generated automatically from g1_aarch64.m4.
445 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
446 instruct g1CompareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
447 %{
448 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
449 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
450 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
451 ins_cost(2 * VOLATILE_REF_COST);
452 format %{ "cmpxchg $res = $mem, $oldval, $newval\t# narrow oop" %}
453 ins_encode %{
454 assert_different_registers($oldval$$Register, $mem$$Register);
455 assert_different_registers($newval$$Register, $mem$$Register);
456 write_barrier_pre(masm, this,
457 $mem$$Register /* obj */,
458 $tmp1$$Register /* pre_val */,
459 $tmp2$$Register /* tmp1 */,
460 $tmp3$$Register /* tmp2 */,
461 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
462 RegSet::of($res$$Register) /* no_preserve */);
463 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
464 false /* acquire */, true /* release */, false /* weak */, $res$$Register);
465 __ decode_heap_oop($tmp1$$Register, $newval$$Register);
466 write_barrier_post(masm, this,
467 $mem$$Register /* store_addr */,
468 $tmp1$$Register /* new_val */,
469 $tmp2$$Register /* tmp1 */,
470 $tmp3$$Register /* tmp2 */);
471 %}
472 ins_pipe(pipe_slow);
473 %}
474
475 // This pattern is generated automatically from g1_aarch64.m4.
476 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
477 instruct g1CompareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
478 %{
479 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
480 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
481 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
482 ins_cost(VOLATILE_REF_COST);
483 format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# narrow oop" %}
484 ins_encode %{
485 assert_different_registers($oldval$$Register, $mem$$Register);
486 assert_different_registers($newval$$Register, $mem$$Register);
487 write_barrier_pre(masm, this,
488 $mem$$Register /* obj */,
489 $tmp1$$Register /* pre_val */,
490 $tmp2$$Register /* tmp1 */,
491 $tmp3$$Register /* tmp2 */,
492 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
493 RegSet::of($res$$Register) /* no_preserve */);
494 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
495 true /* acquire */, true /* release */, false /* weak */, $res$$Register);
496 __ decode_heap_oop($tmp1$$Register, $newval$$Register);
497 write_barrier_post(masm, this,
498 $mem$$Register /* store_addr */,
499 $tmp1$$Register /* new_val */,
500 $tmp2$$Register /* tmp1 */,
501 $tmp3$$Register /* tmp2 */);
502 %}
503 ins_pipe(pipe_slow);
504 %}
505
506 // This pattern is generated automatically from g1_aarch64.m4.
507 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
508 instruct g1CompareAndSwapP(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr)
509 %{
510 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
511 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
512 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
513 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
514 ins_cost(2 * VOLATILE_REF_COST);
515 format %{ "cmpxchg $mem, $oldval, $newval\t# (ptr)\n\t"
516 "cset $res, EQ" %}
517 ins_encode %{
518 assert_different_registers($oldval$$Register, $mem$$Register);
519 assert_different_registers($newval$$Register, $mem$$Register);
520 write_barrier_pre(masm, this,
521 noreg /* obj */,
522 $oldval$$Register /* pre_val */,
523 $tmp1$$Register /* tmp1 */,
524 $tmp2$$Register /* tmp2 */,
525 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
526 RegSet::of($res$$Register) /* no_preserve */);
527 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
528 false /* acquire */, true /* release */, false /* weak */, noreg);
529 __ cset($res$$Register, Assembler::EQ);
530 write_barrier_post(masm, this,
531 $mem$$Register /* store_addr */,
532 $newval$$Register /* new_val */,
533 $tmp1$$Register /* tmp1 */,
534 $tmp2$$Register /* tmp2 */);
535 %}
536 ins_pipe(pipe_slow);
537 %}
538
539 // This pattern is generated automatically from g1_aarch64.m4.
540 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
541 instruct g1CompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr)
542 %{
543 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
544 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
545 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
546 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr);
547 ins_cost(VOLATILE_REF_COST);
548 format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (ptr)\n\t"
549 "cset $res, EQ" %}
550 ins_encode %{
551 assert_different_registers($oldval$$Register, $mem$$Register);
552 assert_different_registers($newval$$Register, $mem$$Register);
553 write_barrier_pre(masm, this,
554 noreg /* obj */,
555 $oldval$$Register /* pre_val */,
556 $tmp1$$Register /* tmp1 */,
557 $tmp2$$Register /* tmp2 */,
558 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
559 RegSet::of($res$$Register) /* no_preserve */);
560 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
561 true /* acquire */, true /* release */, false /* weak */, noreg);
562 __ cset($res$$Register, Assembler::EQ);
563 write_barrier_post(masm, this,
564 $mem$$Register /* store_addr */,
565 $newval$$Register /* new_val */,
566 $tmp1$$Register /* tmp1 */,
567 $tmp2$$Register /* tmp2 */);
568 %}
569 ins_pipe(pipe_slow);
570 %}
571
572 // This pattern is generated automatically from g1_aarch64.m4.
573 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
574 instruct g1CompareAndSwapN(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr)
575 %{
576 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
577 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
578 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
579 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
580 ins_cost(2 * VOLATILE_REF_COST);
581 format %{ "cmpxchg $mem, $oldval, $newval\t# (narrow oop)\n\t"
582 "cset $res, EQ" %}
583 ins_encode %{
584 assert_different_registers($oldval$$Register, $mem$$Register);
585 assert_different_registers($newval$$Register, $mem$$Register);
586 write_barrier_pre(masm, this,
587 $mem$$Register /* obj */,
588 $tmp1$$Register /* pre_val */,
589 $tmp2$$Register /* tmp1 */,
590 $tmp3$$Register /* tmp2 */,
591 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
592 RegSet::of($res$$Register) /* no_preserve */);
593 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
594 false /* acquire */, true /* release */, false /* weak */, noreg);
595 __ cset($res$$Register, Assembler::EQ);
596 __ decode_heap_oop($tmp1$$Register, $newval$$Register);
597 write_barrier_post(masm, this,
598 $mem$$Register /* store_addr */,
599 $tmp1$$Register /* new_val */,
600 $tmp2$$Register /* tmp1 */,
601 $tmp3$$Register /* tmp2 */);
602 %}
603 ins_pipe(pipe_slow);
604 %}
605
606 // This pattern is generated automatically from g1_aarch64.m4.
607 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
608 instruct g1CompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr)
609 %{
610 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
611 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
612 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
613 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
614 ins_cost(VOLATILE_REF_COST);
615 format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop)\n\t"
616 "cset $res, EQ" %}
617 ins_encode %{
618 assert_different_registers($oldval$$Register, $mem$$Register);
619 assert_different_registers($newval$$Register, $mem$$Register);
620 write_barrier_pre(masm, this,
621 $mem$$Register /* obj */,
622 $tmp1$$Register /* pre_val */,
623 $tmp2$$Register /* tmp1 */,
624 $tmp3$$Register /* tmp2 */,
625 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */,
626 RegSet::of($res$$Register) /* no_preserve */);
627 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word,
628 true /* acquire */, true /* release */, false /* weak */, noreg);
629 __ cset($res$$Register, Assembler::EQ);
630 __ decode_heap_oop($tmp1$$Register, $newval$$Register);
631 write_barrier_post(masm, this,
632 $mem$$Register /* store_addr */,
633 $tmp1$$Register /* new_val */,
634 $tmp2$$Register /* tmp1 */,
635 $tmp3$$Register /* tmp2 */);
636 %}
637 ins_pipe(pipe_slow);
638 %}
639
640 // This pattern is generated automatically from g1_aarch64.m4.
641 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
642 instruct g1GetAndSetP(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr)
643 %{
644 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
645 match(Set preval (GetAndSetP mem newval));
646 effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr);
647 ins_cost(2 * VOLATILE_REF_COST);
648 format %{ "atomic_xchg $preval, $newval, [$mem]" %}
649 ins_encode %{
650 assert_different_registers($mem$$Register, $newval$$Register);
651 write_barrier_pre(masm, this,
652 $mem$$Register /* obj */,
653 $preval$$Register /* pre_val (as a temporary register) */,
654 $tmp1$$Register /* tmp1 */,
655 $tmp2$$Register /* tmp2 */,
656 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
657 __ atomic_xchg($preval$$Register, $newval$$Register, $mem$$Register);
658 write_barrier_post(masm, this,
659 $mem$$Register /* store_addr */,
660 $newval$$Register /* new_val */,
661 $tmp1$$Register /* tmp1 */,
662 $tmp2$$Register /* tmp2 */);
663 %}
664 ins_pipe(pipe_serial);
665 %}
666
667 // This pattern is generated automatically from g1_aarch64.m4.
668 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
669 instruct g1GetAndSetPAcq(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr)
670 %{
671 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
672 match(Set preval (GetAndSetP mem newval));
673 effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr);
674 ins_cost(VOLATILE_REF_COST);
675 format %{ "atomic_xchg_acq $preval, $newval, [$mem]" %}
676 ins_encode %{
677 assert_different_registers($mem$$Register, $newval$$Register);
678 write_barrier_pre(masm, this,
679 $mem$$Register /* obj */,
680 $preval$$Register /* pre_val (as a temporary register) */,
681 $tmp1$$Register /* tmp1 */,
682 $tmp2$$Register /* tmp2 */,
683 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
684 __ atomic_xchgal($preval$$Register, $newval$$Register, $mem$$Register);
685 write_barrier_post(masm, this,
686 $mem$$Register /* store_addr */,
687 $newval$$Register /* new_val */,
688 $tmp1$$Register /* tmp1 */,
689 $tmp2$$Register /* tmp2 */);
690 %}
691 ins_pipe(pipe_serial);
692 %}
693
694 // This pattern is generated automatically from g1_aarch64.m4.
695 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
696 instruct g1GetAndSetN(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr)
697 %{
698 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
699 match(Set preval (GetAndSetN mem newval));
700 effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
701 ins_cost(2 * VOLATILE_REF_COST);
702 format %{ "atomic_xchgw $preval, $newval, [$mem]" %}
703 ins_encode %{
704 assert_different_registers($mem$$Register, $newval$$Register);
705 write_barrier_pre(masm, this,
706 $mem$$Register /* obj */,
707 $tmp1$$Register /* pre_val */,
708 $tmp2$$Register /* tmp1 */,
709 $tmp3$$Register /* tmp2 */,
710 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
711 __ atomic_xchgw($preval$$Register, $newval$$Register, $mem$$Register);
712 __ decode_heap_oop($tmp1$$Register, $newval$$Register);
713 write_barrier_post(masm, this,
714 $mem$$Register /* store_addr */,
715 $tmp1$$Register /* new_val */,
716 $tmp2$$Register /* tmp1 */,
717 $tmp3$$Register /* tmp2 */);
718 %}
719 ins_pipe(pipe_serial);
720 %}
721
722 // This pattern is generated automatically from g1_aarch64.m4.
723 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
724 instruct g1GetAndSetNAcq(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr)
725 %{
726 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0);
727 match(Set preval (GetAndSetN mem newval));
728 effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
729 ins_cost(VOLATILE_REF_COST);
730 format %{ "atomic_xchgw_acq $preval, $newval, [$mem]" %}
731 ins_encode %{
732 assert_different_registers($mem$$Register, $newval$$Register);
733 write_barrier_pre(masm, this,
734 $mem$$Register /* obj */,
735 $tmp1$$Register /* pre_val */,
736 $tmp2$$Register /* tmp1 */,
737 $tmp3$$Register /* tmp2 */,
738 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */);
739 __ atomic_xchgalw($preval$$Register, $newval$$Register, $mem$$Register);
740 __ decode_heap_oop($tmp1$$Register, $newval$$Register);
741 write_barrier_post(masm, this,
742 $mem$$Register /* store_addr */,
743 $tmp1$$Register /* new_val */,
744 $tmp2$$Register /* tmp1 */,
745 $tmp3$$Register /* tmp2 */);
746 %}
747 ins_pipe(pipe_serial);
748 %}
749
750 // This pattern is generated automatically from g1_aarch64.m4.
751 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
752 instruct g1LoadP(iRegPNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr)
753 %{
754 // This instruction does not need an acquiring counterpart because it is only
755 // used for reference loading (Reference::get()). The same holds for g1LoadN.
756 predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
757 match(Set dst (LoadP mem));
758 effect(TEMP dst, TEMP tmp1, TEMP tmp2, KILL cr);
759 ins_cost(4 * INSN_COST);
760 format %{ "ldr $dst, $mem\t# ptr" %}
761 ins_encode %{
762 __ ldr($dst$$Register, $mem$$Register);
763 write_barrier_pre(masm, this,
764 noreg /* obj */,
765 $dst$$Register /* pre_val */,
766 $tmp1$$Register /* tmp1 */,
767 $tmp2$$Register /* tmp2 */);
768 %}
769 ins_pipe(iload_reg_mem);
770 %}
771
772 // This pattern is generated automatically from g1_aarch64.m4.
773 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
774 instruct g1LoadN(iRegNNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr)
775 %{
776 predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
777 match(Set dst (LoadN mem));
778 effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
779 ins_cost(4 * INSN_COST);
780 format %{ "ldrw $dst, $mem\t# compressed ptr" %}
781 ins_encode %{
782 __ ldrw($dst$$Register, $mem$$Register);
783 if ((barrier_data() & G1C2BarrierPre) != 0) {
784 __ decode_heap_oop($tmp1$$Register, $dst$$Register);
785 write_barrier_pre(masm, this,
786 noreg /* obj */,
787 $tmp1$$Register /* pre_val */,
788 $tmp2$$Register /* tmp1 */,
789 $tmp3$$Register /* tmp2 */);
790 }
791 %}
792 ins_pipe(iload_reg_mem);
793 %}
794
795 // END This section of the file is automatically generated. Do not edit --------------