1 // 2 // Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 24 source_hpp %{ 25 26 #include "gc/g1/c2/g1BarrierSetC2.hpp" 27 #include "gc/shared/gc_globals.hpp" 28 29 %} 30 31 source %{ 32 33 #include "gc/g1/g1BarrierSetAssembler_aarch64.hpp" 34 #include "gc/g1/g1BarrierSetRuntime.hpp" 35 36 static void write_barrier_pre(MacroAssembler* masm, 37 const MachNode* node, 38 Register obj, 39 Register pre_val, 40 Register tmp1, 41 Register tmp2, 42 RegSet preserve = RegSet(), 43 RegSet no_preserve = RegSet()) { 44 if (!G1PreBarrierStubC2::needs_barrier(node)) { 45 return; 46 } 47 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 48 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler()); 49 G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node); 50 for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) { 51 stub->preserve(*reg); 52 } 53 for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) { 54 stub->dont_preserve(*reg); 55 } 56 g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, rthread, tmp1, tmp2, stub); 57 } 58 59 static void write_barrier_post(MacroAssembler* masm, 60 const MachNode* node, 61 Register store_addr, 62 Register new_val, 63 Register tmp1, 64 Register tmp2) { 65 if (!G1PostBarrierStubC2::needs_barrier(node)) { 66 return; 67 } 68 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 69 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler()); 70 G1PostBarrierStubC2* const stub = G1PostBarrierStubC2::create(node); 71 g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, rthread, tmp1, tmp2, stub); 72 } 73 74 %} 75 76 // BEGIN This section of the file is automatically generated. Do not edit -------------- 77 78 // This section is generated from g1_aarch64.m4 79 80 81 // This pattern is generated automatically from g1_aarch64.m4. 82 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 83 instruct g1StoreP(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 84 %{ 85 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 86 match(Set mem (StoreP mem src)); 87 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 88 ins_cost(INSN_COST); 89 format %{ "str $src, $mem\t# ptr" %} 90 ins_encode %{ 91 write_barrier_pre(masm, this, 92 $mem$$Register /* obj */, 93 $tmp1$$Register /* pre_val */, 94 $tmp2$$Register /* tmp1 */, 95 $tmp3$$Register /* tmp2 */, 96 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 97 __ str($src$$Register, $mem$$Register); 98 write_barrier_post(masm, this, 99 $mem$$Register /* store_addr */, 100 $src$$Register /* new_val */, 101 $tmp2$$Register /* tmp1 */, 102 $tmp3$$Register /* tmp2 */); 103 %} 104 ins_pipe(istore_reg_mem); 105 %} 106 107 // This pattern is generated automatically from g1_aarch64.m4. 108 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 109 instruct g1StorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 110 %{ 111 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 112 match(Set mem (StoreP mem src)); 113 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 114 ins_cost(VOLATILE_REF_COST); 115 format %{ "stlr $src, $mem\t# ptr" %} 116 ins_encode %{ 117 write_barrier_pre(masm, this, 118 $mem$$Register /* obj */, 119 $tmp1$$Register /* pre_val */, 120 $tmp2$$Register /* tmp1 */, 121 $tmp3$$Register /* tmp2 */, 122 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 123 __ stlr($src$$Register, $mem$$Register); 124 write_barrier_post(masm, this, 125 $mem$$Register /* store_addr */, 126 $src$$Register /* new_val */, 127 $tmp2$$Register /* tmp1 */, 128 $tmp3$$Register /* tmp2 */); 129 %} 130 ins_pipe(pipe_class_memory); 131 %} 132 133 // This pattern is generated automatically from g1_aarch64.m4. 134 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 135 instruct g1StoreN(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 136 %{ 137 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 138 match(Set mem (StoreN mem src)); 139 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 140 ins_cost(INSN_COST); 141 format %{ "strw $src, $mem\t# compressed ptr" %} 142 ins_encode %{ 143 write_barrier_pre(masm, this, 144 $mem$$Register /* obj */, 145 $tmp1$$Register /* pre_val */, 146 $tmp2$$Register /* tmp1 */, 147 $tmp3$$Register /* tmp2 */, 148 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 149 __ strw($src$$Register, $mem$$Register); 150 if ((barrier_data() & G1C2BarrierPost) != 0) { 151 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 152 __ decode_heap_oop($tmp1$$Register, $src$$Register); 153 } else { 154 __ decode_heap_oop_not_null($tmp1$$Register, $src$$Register); 155 } 156 } 157 write_barrier_post(masm, this, 158 $mem$$Register /* store_addr */, 159 $tmp1$$Register /* new_val */, 160 $tmp2$$Register /* tmp1 */, 161 $tmp3$$Register /* tmp2 */); 162 %} 163 ins_pipe(istore_reg_mem); 164 %} 165 166 // This pattern is generated automatically from g1_aarch64.m4. 167 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 168 instruct g1StoreNVolatile(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 169 %{ 170 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 171 match(Set mem (StoreN mem src)); 172 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 173 ins_cost(VOLATILE_REF_COST); 174 format %{ "stlrw $src, $mem\t# compressed ptr" %} 175 ins_encode %{ 176 write_barrier_pre(masm, this, 177 $mem$$Register /* obj */, 178 $tmp1$$Register /* pre_val */, 179 $tmp2$$Register /* tmp1 */, 180 $tmp3$$Register /* tmp2 */, 181 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 182 __ stlrw($src$$Register, $mem$$Register); 183 if ((barrier_data() & G1C2BarrierPost) != 0) { 184 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 185 __ decode_heap_oop($tmp1$$Register, $src$$Register); 186 } else { 187 __ decode_heap_oop_not_null($tmp1$$Register, $src$$Register); 188 } 189 } 190 write_barrier_post(masm, this, 191 $mem$$Register /* store_addr */, 192 $tmp1$$Register /* new_val */, 193 $tmp2$$Register /* tmp1 */, 194 $tmp3$$Register /* tmp2 */); 195 %} 196 ins_pipe(pipe_class_memory); 197 %} 198 199 // This pattern is generated automatically from g1_aarch64.m4. 200 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 201 instruct g1EncodePAndStoreN(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 202 %{ 203 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 204 match(Set mem (StoreN mem (EncodeP src))); 205 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 206 ins_cost(INSN_COST); 207 format %{ "encode_heap_oop $tmp1, $src\n\t" 208 "strw $tmp1, $mem\t# compressed ptr" %} 209 ins_encode %{ 210 write_barrier_pre(masm, this, 211 $mem$$Register /* obj */, 212 $tmp1$$Register /* pre_val */, 213 $tmp2$$Register /* tmp1 */, 214 $tmp3$$Register /* tmp2 */, 215 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 216 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 217 __ encode_heap_oop($tmp1$$Register, $src$$Register); 218 } else { 219 __ encode_heap_oop_not_null($tmp1$$Register, $src$$Register); 220 } 221 __ strw($tmp1$$Register, $mem$$Register); 222 write_barrier_post(masm, this, 223 $mem$$Register /* store_addr */, 224 $src$$Register /* new_val */, 225 $tmp2$$Register /* tmp1 */, 226 $tmp3$$Register /* tmp2 */); 227 %} 228 ins_pipe(istore_reg_mem); 229 %} 230 231 // This pattern is generated automatically from g1_aarch64.m4. 232 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 233 instruct g1EncodePAndStoreNVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 234 %{ 235 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 236 match(Set mem (StoreN mem (EncodeP src))); 237 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 238 ins_cost(VOLATILE_REF_COST); 239 format %{ "encode_heap_oop $tmp1, $src\n\t" 240 "stlrw $tmp1, $mem\t# compressed ptr" %} 241 ins_encode %{ 242 write_barrier_pre(masm, this, 243 $mem$$Register /* obj */, 244 $tmp1$$Register /* pre_val */, 245 $tmp2$$Register /* tmp1 */, 246 $tmp3$$Register /* tmp2 */, 247 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 248 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 249 __ encode_heap_oop($tmp1$$Register, $src$$Register); 250 } else { 251 __ encode_heap_oop_not_null($tmp1$$Register, $src$$Register); 252 } 253 __ stlrw($tmp1$$Register, $mem$$Register); 254 write_barrier_post(masm, this, 255 $mem$$Register /* store_addr */, 256 $src$$Register /* new_val */, 257 $tmp2$$Register /* tmp1 */, 258 $tmp3$$Register /* tmp2 */); 259 %} 260 ins_pipe(pipe_class_memory); 261 %} 262 263 // This pattern is generated automatically from g1_aarch64.m4. 264 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 265 instruct g1CompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr) 266 %{ 267 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 268 match(Set res (CompareAndExchangeP mem (Binary oldval newval))); 269 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 270 ins_cost(2 * VOLATILE_REF_COST); 271 format %{ "cmpxchg $res = $mem, $oldval, $newval\t# ptr" %} 272 ins_encode %{ 273 assert_different_registers($oldval$$Register, $mem$$Register); 274 assert_different_registers($newval$$Register, $mem$$Register); 275 // Pass $oldval to the pre-barrier (instead of loading from $mem), because 276 // $oldval is the only value that can be overwritten. 277 // The same holds for g1CompareAndSwapP and its Acq variant. 278 write_barrier_pre(masm, this, 279 noreg /* obj */, 280 $oldval$$Register /* pre_val */, 281 $tmp1$$Register /* tmp1 */, 282 $tmp2$$Register /* tmp2 */, 283 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 284 RegSet::of($res$$Register) /* no_preserve */); 285 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 286 false /* acquire */, true /* release */, false /* weak */, $res$$Register); 287 write_barrier_post(masm, this, 288 $mem$$Register /* store_addr */, 289 $newval$$Register /* new_val */, 290 $tmp1$$Register /* tmp1 */, 291 $tmp2$$Register /* tmp2 */); 292 %} 293 ins_pipe(pipe_slow); 294 %} 295 296 // This pattern is generated automatically from g1_aarch64.m4. 297 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 298 instruct g1CompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr) 299 %{ 300 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 301 match(Set res (CompareAndExchangeP mem (Binary oldval newval))); 302 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 303 ins_cost(VOLATILE_REF_COST); 304 format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# ptr" %} 305 ins_encode %{ 306 assert_different_registers($oldval$$Register, $mem$$Register); 307 assert_different_registers($newval$$Register, $mem$$Register); 308 // Pass $oldval to the pre-barrier (instead of loading from $mem), because 309 // $oldval is the only value that can be overwritten. 310 // The same holds for g1CompareAndSwapP and its Acq variant. 311 write_barrier_pre(masm, this, 312 noreg /* obj */, 313 $oldval$$Register /* pre_val */, 314 $tmp1$$Register /* tmp1 */, 315 $tmp2$$Register /* tmp2 */, 316 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 317 RegSet::of($res$$Register) /* no_preserve */); 318 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 319 true /* acquire */, true /* release */, false /* weak */, $res$$Register); 320 write_barrier_post(masm, this, 321 $mem$$Register /* store_addr */, 322 $newval$$Register /* new_val */, 323 $tmp1$$Register /* tmp1 */, 324 $tmp2$$Register /* tmp2 */); 325 %} 326 ins_pipe(pipe_slow); 327 %} 328 329 // This pattern is generated automatically from g1_aarch64.m4. 330 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 331 instruct g1CompareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 332 %{ 333 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 334 match(Set res (CompareAndExchangeN mem (Binary oldval newval))); 335 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 336 ins_cost(2 * VOLATILE_REF_COST); 337 format %{ "cmpxchg $res = $mem, $oldval, $newval\t# narrow oop" %} 338 ins_encode %{ 339 assert_different_registers($oldval$$Register, $mem$$Register); 340 assert_different_registers($newval$$Register, $mem$$Register); 341 write_barrier_pre(masm, this, 342 $mem$$Register /* obj */, 343 $tmp1$$Register /* pre_val */, 344 $tmp2$$Register /* tmp1 */, 345 $tmp3$$Register /* tmp2 */, 346 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 347 RegSet::of($res$$Register) /* no_preserve */); 348 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 349 false /* acquire */, true /* release */, false /* weak */, $res$$Register); 350 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 351 write_barrier_post(masm, this, 352 $mem$$Register /* store_addr */, 353 $tmp1$$Register /* new_val */, 354 $tmp2$$Register /* tmp1 */, 355 $tmp3$$Register /* tmp2 */); 356 %} 357 ins_pipe(pipe_slow); 358 %} 359 360 // This pattern is generated automatically from g1_aarch64.m4. 361 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 362 instruct g1CompareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 363 %{ 364 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 365 match(Set res (CompareAndExchangeN mem (Binary oldval newval))); 366 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 367 ins_cost(VOLATILE_REF_COST); 368 format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# narrow oop" %} 369 ins_encode %{ 370 assert_different_registers($oldval$$Register, $mem$$Register); 371 assert_different_registers($newval$$Register, $mem$$Register); 372 write_barrier_pre(masm, this, 373 $mem$$Register /* obj */, 374 $tmp1$$Register /* pre_val */, 375 $tmp2$$Register /* tmp1 */, 376 $tmp3$$Register /* tmp2 */, 377 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 378 RegSet::of($res$$Register) /* no_preserve */); 379 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 380 true /* acquire */, true /* release */, false /* weak */, $res$$Register); 381 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 382 write_barrier_post(masm, this, 383 $mem$$Register /* store_addr */, 384 $tmp1$$Register /* new_val */, 385 $tmp2$$Register /* tmp1 */, 386 $tmp3$$Register /* tmp2 */); 387 %} 388 ins_pipe(pipe_slow); 389 %} 390 391 // This pattern is generated automatically from g1_aarch64.m4. 392 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 393 instruct g1CompareAndSwapP(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr) 394 %{ 395 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 396 match(Set res (CompareAndSwapP mem (Binary oldval newval))); 397 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); 398 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 399 ins_cost(2 * VOLATILE_REF_COST); 400 format %{ "cmpxchg $mem, $oldval, $newval\t# (ptr)\n\t" 401 "cset $res, EQ" %} 402 ins_encode %{ 403 assert_different_registers($oldval$$Register, $mem$$Register); 404 assert_different_registers($newval$$Register, $mem$$Register); 405 write_barrier_pre(masm, this, 406 noreg /* obj */, 407 $oldval$$Register /* pre_val */, 408 $tmp1$$Register /* tmp1 */, 409 $tmp2$$Register /* tmp2 */, 410 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 411 RegSet::of($res$$Register) /* no_preserve */); 412 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 413 false /* acquire */, true /* release */, false /* weak */, noreg); 414 __ cset($res$$Register, Assembler::EQ); 415 write_barrier_post(masm, this, 416 $mem$$Register /* store_addr */, 417 $newval$$Register /* new_val */, 418 $tmp1$$Register /* tmp1 */, 419 $tmp2$$Register /* tmp2 */); 420 %} 421 ins_pipe(pipe_slow); 422 %} 423 424 // This pattern is generated automatically from g1_aarch64.m4. 425 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 426 instruct g1CompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr) 427 %{ 428 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 429 match(Set res (CompareAndSwapP mem (Binary oldval newval))); 430 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); 431 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 432 ins_cost(VOLATILE_REF_COST); 433 format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (ptr)\n\t" 434 "cset $res, EQ" %} 435 ins_encode %{ 436 assert_different_registers($oldval$$Register, $mem$$Register); 437 assert_different_registers($newval$$Register, $mem$$Register); 438 write_barrier_pre(masm, this, 439 noreg /* obj */, 440 $oldval$$Register /* pre_val */, 441 $tmp1$$Register /* tmp1 */, 442 $tmp2$$Register /* tmp2 */, 443 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 444 RegSet::of($res$$Register) /* no_preserve */); 445 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 446 true /* acquire */, true /* release */, false /* weak */, noreg); 447 __ cset($res$$Register, Assembler::EQ); 448 write_barrier_post(masm, this, 449 $mem$$Register /* store_addr */, 450 $newval$$Register /* new_val */, 451 $tmp1$$Register /* tmp1 */, 452 $tmp2$$Register /* tmp2 */); 453 %} 454 ins_pipe(pipe_slow); 455 %} 456 457 // This pattern is generated automatically from g1_aarch64.m4. 458 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 459 instruct g1CompareAndSwapN(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr) 460 %{ 461 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 462 match(Set res (CompareAndSwapN mem (Binary oldval newval))); 463 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); 464 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 465 ins_cost(2 * VOLATILE_REF_COST); 466 format %{ "cmpxchg $mem, $oldval, $newval\t# (narrow oop)\n\t" 467 "cset $res, EQ" %} 468 ins_encode %{ 469 assert_different_registers($oldval$$Register, $mem$$Register); 470 assert_different_registers($newval$$Register, $mem$$Register); 471 write_barrier_pre(masm, this, 472 $mem$$Register /* obj */, 473 $tmp1$$Register /* pre_val */, 474 $tmp2$$Register /* tmp1 */, 475 $tmp3$$Register /* tmp2 */, 476 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 477 RegSet::of($res$$Register) /* no_preserve */); 478 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 479 false /* acquire */, true /* release */, false /* weak */, noreg); 480 __ cset($res$$Register, Assembler::EQ); 481 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 482 write_barrier_post(masm, this, 483 $mem$$Register /* store_addr */, 484 $tmp1$$Register /* new_val */, 485 $tmp2$$Register /* tmp1 */, 486 $tmp3$$Register /* tmp2 */); 487 %} 488 ins_pipe(pipe_slow); 489 %} 490 491 // This pattern is generated automatically from g1_aarch64.m4. 492 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 493 instruct g1CompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr) 494 %{ 495 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 496 match(Set res (CompareAndSwapN mem (Binary oldval newval))); 497 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); 498 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 499 ins_cost(VOLATILE_REF_COST); 500 format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop)\n\t" 501 "cset $res, EQ" %} 502 ins_encode %{ 503 assert_different_registers($oldval$$Register, $mem$$Register); 504 assert_different_registers($newval$$Register, $mem$$Register); 505 write_barrier_pre(masm, this, 506 $mem$$Register /* obj */, 507 $tmp1$$Register /* pre_val */, 508 $tmp2$$Register /* tmp1 */, 509 $tmp3$$Register /* tmp2 */, 510 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 511 RegSet::of($res$$Register) /* no_preserve */); 512 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 513 true /* acquire */, true /* release */, false /* weak */, noreg); 514 __ cset($res$$Register, Assembler::EQ); 515 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 516 write_barrier_post(masm, this, 517 $mem$$Register /* store_addr */, 518 $tmp1$$Register /* new_val */, 519 $tmp2$$Register /* tmp1 */, 520 $tmp3$$Register /* tmp2 */); 521 %} 522 ins_pipe(pipe_slow); 523 %} 524 525 // This pattern is generated automatically from g1_aarch64.m4. 526 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 527 instruct g1GetAndSetP(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr) 528 %{ 529 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 530 match(Set preval (GetAndSetP mem newval)); 531 effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr); 532 ins_cost(2 * VOLATILE_REF_COST); 533 format %{ "atomic_xchg $preval, $newval, [$mem]" %} 534 ins_encode %{ 535 assert_different_registers($mem$$Register, $newval$$Register); 536 write_barrier_pre(masm, this, 537 $mem$$Register /* obj */, 538 $preval$$Register /* pre_val (as a temporary register) */, 539 $tmp1$$Register /* tmp1 */, 540 $tmp2$$Register /* tmp2 */, 541 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 542 __ atomic_xchg($preval$$Register, $newval$$Register, $mem$$Register); 543 write_barrier_post(masm, this, 544 $mem$$Register /* store_addr */, 545 $newval$$Register /* new_val */, 546 $tmp1$$Register /* tmp1 */, 547 $tmp2$$Register /* tmp2 */); 548 %} 549 ins_pipe(pipe_serial); 550 %} 551 552 // This pattern is generated automatically from g1_aarch64.m4. 553 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 554 instruct g1GetAndSetPAcq(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr) 555 %{ 556 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 557 match(Set preval (GetAndSetP mem newval)); 558 effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr); 559 ins_cost(VOLATILE_REF_COST); 560 format %{ "atomic_xchg_acq $preval, $newval, [$mem]" %} 561 ins_encode %{ 562 assert_different_registers($mem$$Register, $newval$$Register); 563 write_barrier_pre(masm, this, 564 $mem$$Register /* obj */, 565 $preval$$Register /* pre_val (as a temporary register) */, 566 $tmp1$$Register /* tmp1 */, 567 $tmp2$$Register /* tmp2 */, 568 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 569 __ atomic_xchgal($preval$$Register, $newval$$Register, $mem$$Register); 570 write_barrier_post(masm, this, 571 $mem$$Register /* store_addr */, 572 $newval$$Register /* new_val */, 573 $tmp1$$Register /* tmp1 */, 574 $tmp2$$Register /* tmp2 */); 575 %} 576 ins_pipe(pipe_serial); 577 %} 578 579 // This pattern is generated automatically from g1_aarch64.m4. 580 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 581 instruct g1GetAndSetN(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr) 582 %{ 583 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 584 match(Set preval (GetAndSetN mem newval)); 585 effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 586 ins_cost(2 * VOLATILE_REF_COST); 587 format %{ "atomic_xchgw $preval, $newval, [$mem]" %} 588 ins_encode %{ 589 assert_different_registers($mem$$Register, $newval$$Register); 590 write_barrier_pre(masm, this, 591 $mem$$Register /* obj */, 592 $tmp1$$Register /* pre_val */, 593 $tmp2$$Register /* tmp1 */, 594 $tmp3$$Register /* tmp2 */, 595 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 596 __ atomic_xchgw($preval$$Register, $newval$$Register, $mem$$Register); 597 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 598 write_barrier_post(masm, this, 599 $mem$$Register /* store_addr */, 600 $tmp1$$Register /* new_val */, 601 $tmp2$$Register /* tmp1 */, 602 $tmp3$$Register /* tmp2 */); 603 %} 604 ins_pipe(pipe_serial); 605 %} 606 607 // This pattern is generated automatically from g1_aarch64.m4. 608 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 609 instruct g1GetAndSetNAcq(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr) 610 %{ 611 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 612 match(Set preval (GetAndSetN mem newval)); 613 effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 614 ins_cost(VOLATILE_REF_COST); 615 format %{ "atomic_xchgw_acq $preval, $newval, [$mem]" %} 616 ins_encode %{ 617 assert_different_registers($mem$$Register, $newval$$Register); 618 write_barrier_pre(masm, this, 619 $mem$$Register /* obj */, 620 $tmp1$$Register /* pre_val */, 621 $tmp2$$Register /* tmp1 */, 622 $tmp3$$Register /* tmp2 */, 623 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 624 __ atomic_xchgalw($preval$$Register, $newval$$Register, $mem$$Register); 625 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 626 write_barrier_post(masm, this, 627 $mem$$Register /* store_addr */, 628 $tmp1$$Register /* new_val */, 629 $tmp2$$Register /* tmp1 */, 630 $tmp3$$Register /* tmp2 */); 631 %} 632 ins_pipe(pipe_serial); 633 %} 634 635 // This pattern is generated automatically from g1_aarch64.m4. 636 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 637 instruct g1LoadP(iRegPNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr) 638 %{ 639 // This instruction does not need an acquiring counterpart because it is only 640 // used for reference loading (Reference::get()). The same holds for g1LoadN. 641 predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); 642 match(Set dst (LoadP mem)); 643 effect(TEMP dst, TEMP tmp1, TEMP tmp2, KILL cr); 644 ins_cost(4 * INSN_COST); 645 format %{ "ldr $dst, $mem\t# ptr" %} 646 ins_encode %{ 647 __ ldr($dst$$Register, $mem$$Register); 648 write_barrier_pre(masm, this, 649 noreg /* obj */, 650 $dst$$Register /* pre_val */, 651 $tmp1$$Register /* tmp1 */, 652 $tmp2$$Register /* tmp2 */); 653 %} 654 ins_pipe(iload_reg_mem); 655 %} 656 657 // This pattern is generated automatically from g1_aarch64.m4. 658 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 659 instruct g1LoadN(iRegNNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 660 %{ 661 predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); 662 match(Set dst (LoadN mem)); 663 effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 664 ins_cost(4 * INSN_COST); 665 format %{ "ldrw $dst, $mem\t# compressed ptr" %} 666 ins_encode %{ 667 __ ldrw($dst$$Register, $mem$$Register); 668 if ((barrier_data() & G1C2BarrierPre) != 0) { 669 __ decode_heap_oop($tmp1$$Register, $dst$$Register); 670 write_barrier_pre(masm, this, 671 noreg /* obj */, 672 $tmp1$$Register /* pre_val */, 673 $tmp2$$Register /* tmp1 */, 674 $tmp3$$Register /* tmp2 */); 675 } 676 %} 677 ins_pipe(iload_reg_mem); 678 %} 679 680 // END This section of the file is automatically generated. Do not edit --------------