1 // 2 // Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 24 source_hpp %{ 25 26 #include "gc/g1/c2/g1BarrierSetC2.hpp" 27 #include "gc/shared/gc_globals.hpp" 28 29 %} 30 31 source %{ 32 33 #include "gc/g1/g1BarrierSetAssembler_aarch64.hpp" 34 #include "gc/g1/g1BarrierSetRuntime.hpp" 35 36 static void write_barrier_pre(MacroAssembler* masm, 37 const MachNode* node, 38 Register obj, 39 Register pre_val, 40 Register tmp1, 41 Register tmp2, 42 RegSet preserve = RegSet(), 43 RegSet no_preserve = RegSet()) { 44 if (!G1PreBarrierStubC2::needs_barrier(node)) { 45 return; 46 } 47 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 48 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler()); 49 G1PreBarrierStubC2* const stub = G1PreBarrierStubC2::create(node); 50 for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) { 51 stub->preserve(*reg); 52 } 53 for (RegSetIterator<Register> reg = no_preserve.begin(); *reg != noreg; ++reg) { 54 stub->dont_preserve(*reg); 55 } 56 g1_asm->g1_write_barrier_pre_c2(masm, obj, pre_val, rthread, tmp1, tmp2, stub); 57 } 58 59 static void write_barrier_post(MacroAssembler* masm, 60 const MachNode* node, 61 Register store_addr, 62 Register new_val, 63 Register tmp1, 64 Register tmp2, 65 RegSet preserve = RegSet()) { 66 if (!G1PostBarrierStubC2::needs_barrier(node)) { 67 return; 68 } 69 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 70 G1BarrierSetAssembler* g1_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler()); 71 G1PostBarrierStubC2* const stub = G1PostBarrierStubC2::create(node); 72 for (RegSetIterator<Register> reg = preserve.begin(); *reg != noreg; ++reg) { 73 stub->preserve(*reg); 74 } 75 g1_asm->g1_write_barrier_post_c2(masm, store_addr, new_val, rthread, tmp1, tmp2, stub); 76 } 77 78 %} 79 80 // TODO 8350865 (same applies to g1StoreLSpecialTwoOops) 81 // - Can we use an unbound register for src? 82 // - Do no set/overwrite barrier data here, also handle G1C2BarrierPostNotNull 83 // - Is the zero-extend really required in all the places? 84 // - Move this into the .m4? 85 instruct g1StoreLSpecialOneOop(indirect mem, iRegL_R11 src, immI off, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4, rFlagsReg cr) 86 %{ 87 predicate(UseG1GC); 88 match(Set mem (StoreLSpecial mem (Binary src off))); 89 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, KILL cr); 90 ins_cost(INSN_COST); 91 format %{ "str $src, $mem\t# g1StoreLSpecialOneOop" %} 92 ins_encode %{ 93 ((MachNode*)this)->set_barrier_data(G1C2BarrierPre | G1C2BarrierPost); 94 95 // Adjust address to point to narrow oop 96 __ add($tmp4$$Register, $mem$$Register, $off$$constant); 97 write_barrier_pre(masm, this, 98 $tmp4$$Register /* obj */, 99 $tmp1$$Register /* pre_val */, 100 $tmp2$$Register /* tmp1 */, 101 $tmp3$$Register /* tmp2 */, 102 RegSet::of($mem$$Register, $src$$Register, $tmp4$$Register) /* preserve */); 103 104 __ str($src$$Register, $mem$$Register); 105 106 // Shift long value to extract the narrow oop field value and zero-extend it 107 __ lsr($src$$Register, $src$$Register, $off$$constant << LogBitsPerByte); 108 __ ubfm($src$$Register, $src$$Register, 0, 31); 109 110 write_barrier_post(masm, this, 111 $tmp4$$Register /* store_addr */, 112 $src$$Register /* new_val */, 113 $tmp2$$Register /* tmp1 */, 114 $tmp3$$Register /* tmp2 */); 115 %} 116 ins_pipe(istore_reg_mem); 117 %} 118 119 instruct g1StoreLSpecialTwoOops(indirect mem, iRegL_R11 src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegPNoSp tmp4, rFlagsReg cr) 120 %{ 121 predicate(UseG1GC); 122 match(Set mem (StoreLSpecial mem src)); 123 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, KILL cr); 124 ins_cost(INSN_COST); 125 format %{ "str $src, $mem\t# g1StoreLSpecialTwoOops" %} 126 ins_encode %{ 127 ((MachNode*)this)->set_barrier_data(G1C2BarrierPre | G1C2BarrierPost); 128 129 write_barrier_pre(masm, this, 130 $mem$$Register /* obj */, 131 $tmp1$$Register /* pre_val */, 132 $tmp2$$Register /* tmp1 */, 133 $tmp3$$Register /* tmp2 */, 134 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 135 // Adjust address to point to the second narrow oop in the long value 136 __ add($tmp4$$Register, $mem$$Register, 4); 137 write_barrier_pre(masm, this, 138 $tmp4$$Register /* obj */, 139 $tmp1$$Register /* pre_val */, 140 $tmp2$$Register /* tmp1 */, 141 $tmp3$$Register /* tmp2 */, 142 RegSet::of($mem$$Register, $src$$Register, $tmp4$$Register) /* preserve */); 143 144 __ str($src$$Register, $mem$$Register); 145 146 // Zero-extend first narrow oop to long 147 __ ubfm($tmp1$$Register, $src$$Register, 0, 31); 148 149 // Shift long value to extract the second narrow oop field value 150 __ lsr($src$$Register, $src$$Register, 32); 151 152 write_barrier_post(masm, this, 153 $mem$$Register /* store_addr */, 154 $tmp1$$Register /* new_val */, 155 $tmp2$$Register /* tmp1 */, 156 $tmp3$$Register /* tmp2 */, 157 RegSet::of($tmp1$$Register, $tmp4$$Register) /* preserve */); 158 write_barrier_post(masm, this, 159 $tmp4$$Register /* store_addr */, 160 $src$$Register /* new_val */, 161 $tmp2$$Register /* tmp1 */, 162 $tmp3$$Register /* tmp2 */); 163 %} 164 ins_pipe(istore_reg_mem); 165 %} 166 167 168 // BEGIN This section of the file is automatically generated. Do not edit -------------- 169 170 // This section is generated from g1_aarch64.m4 171 172 173 // This pattern is generated automatically from g1_aarch64.m4. 174 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 175 instruct g1StoreP(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 176 %{ 177 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 178 match(Set mem (StoreP mem src)); 179 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 180 ins_cost(INSN_COST); 181 format %{ "str $src, $mem\t# ptr" %} 182 ins_encode %{ 183 write_barrier_pre(masm, this, 184 $mem$$Register /* obj */, 185 $tmp1$$Register /* pre_val */, 186 $tmp2$$Register /* tmp1 */, 187 $tmp3$$Register /* tmp2 */, 188 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 189 __ str($src$$Register, $mem$$Register); 190 write_barrier_post(masm, this, 191 $mem$$Register /* store_addr */, 192 $src$$Register /* new_val */, 193 $tmp2$$Register /* tmp1 */, 194 $tmp3$$Register /* tmp2 */); 195 %} 196 ins_pipe(istore_reg_mem); 197 %} 198 199 // This pattern is generated automatically from g1_aarch64.m4. 200 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 201 instruct g1StorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 202 %{ 203 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 204 match(Set mem (StoreP mem src)); 205 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 206 ins_cost(VOLATILE_REF_COST); 207 format %{ "stlr $src, $mem\t# ptr" %} 208 ins_encode %{ 209 write_barrier_pre(masm, this, 210 $mem$$Register /* obj */, 211 $tmp1$$Register /* pre_val */, 212 $tmp2$$Register /* tmp1 */, 213 $tmp3$$Register /* tmp2 */, 214 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 215 __ stlr($src$$Register, $mem$$Register); 216 write_barrier_post(masm, this, 217 $mem$$Register /* store_addr */, 218 $src$$Register /* new_val */, 219 $tmp2$$Register /* tmp1 */, 220 $tmp3$$Register /* tmp2 */); 221 %} 222 ins_pipe(pipe_class_memory); 223 %} 224 225 // This pattern is generated automatically from g1_aarch64.m4. 226 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 227 instruct g1StoreN(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 228 %{ 229 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 230 match(Set mem (StoreN mem src)); 231 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 232 ins_cost(INSN_COST); 233 format %{ "strw $src, $mem\t# compressed ptr" %} 234 ins_encode %{ 235 write_barrier_pre(masm, this, 236 $mem$$Register /* obj */, 237 $tmp1$$Register /* pre_val */, 238 $tmp2$$Register /* tmp1 */, 239 $tmp3$$Register /* tmp2 */, 240 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 241 __ strw($src$$Register, $mem$$Register); 242 if ((barrier_data() & G1C2BarrierPost) != 0) { 243 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 244 __ decode_heap_oop($tmp1$$Register, $src$$Register); 245 } else { 246 __ decode_heap_oop_not_null($tmp1$$Register, $src$$Register); 247 } 248 } 249 write_barrier_post(masm, this, 250 $mem$$Register /* store_addr */, 251 $tmp1$$Register /* new_val */, 252 $tmp2$$Register /* tmp1 */, 253 $tmp3$$Register /* tmp2 */); 254 %} 255 ins_pipe(istore_reg_mem); 256 %} 257 258 // This pattern is generated automatically from g1_aarch64.m4. 259 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 260 instruct g1StoreNVolatile(indirect mem, iRegN src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 261 %{ 262 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 263 match(Set mem (StoreN mem src)); 264 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 265 ins_cost(VOLATILE_REF_COST); 266 format %{ "stlrw $src, $mem\t# compressed ptr" %} 267 ins_encode %{ 268 write_barrier_pre(masm, this, 269 $mem$$Register /* obj */, 270 $tmp1$$Register /* pre_val */, 271 $tmp2$$Register /* tmp1 */, 272 $tmp3$$Register /* tmp2 */, 273 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 274 __ stlrw($src$$Register, $mem$$Register); 275 if ((barrier_data() & G1C2BarrierPost) != 0) { 276 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 277 __ decode_heap_oop($tmp1$$Register, $src$$Register); 278 } else { 279 __ decode_heap_oop_not_null($tmp1$$Register, $src$$Register); 280 } 281 } 282 write_barrier_post(masm, this, 283 $mem$$Register /* store_addr */, 284 $tmp1$$Register /* new_val */, 285 $tmp2$$Register /* tmp1 */, 286 $tmp3$$Register /* tmp2 */); 287 %} 288 ins_pipe(pipe_class_memory); 289 %} 290 291 // This pattern is generated automatically from g1_aarch64.m4. 292 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 293 instruct g1EncodePAndStoreN(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 294 %{ 295 predicate(UseG1GC && !needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 296 match(Set mem (StoreN mem (EncodeP src))); 297 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 298 ins_cost(INSN_COST); 299 format %{ "encode_heap_oop $tmp1, $src\n\t" 300 "strw $tmp1, $mem\t# compressed ptr" %} 301 ins_encode %{ 302 write_barrier_pre(masm, this, 303 $mem$$Register /* obj */, 304 $tmp1$$Register /* pre_val */, 305 $tmp2$$Register /* tmp1 */, 306 $tmp3$$Register /* tmp2 */, 307 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 308 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 309 __ encode_heap_oop($tmp1$$Register, $src$$Register); 310 } else { 311 __ encode_heap_oop_not_null($tmp1$$Register, $src$$Register); 312 } 313 __ strw($tmp1$$Register, $mem$$Register); 314 write_barrier_post(masm, this, 315 $mem$$Register /* store_addr */, 316 $src$$Register /* new_val */, 317 $tmp2$$Register /* tmp1 */, 318 $tmp3$$Register /* tmp2 */); 319 %} 320 ins_pipe(istore_reg_mem); 321 %} 322 323 // This pattern is generated automatically from g1_aarch64.m4. 324 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 325 instruct g1EncodePAndStoreNVolatile(indirect mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 326 %{ 327 predicate(UseG1GC && needs_releasing_store(n) && n->as_Store()->barrier_data() != 0); 328 match(Set mem (StoreN mem (EncodeP src))); 329 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 330 ins_cost(VOLATILE_REF_COST); 331 format %{ "encode_heap_oop $tmp1, $src\n\t" 332 "stlrw $tmp1, $mem\t# compressed ptr" %} 333 ins_encode %{ 334 write_barrier_pre(masm, this, 335 $mem$$Register /* obj */, 336 $tmp1$$Register /* pre_val */, 337 $tmp2$$Register /* tmp1 */, 338 $tmp3$$Register /* tmp2 */, 339 RegSet::of($mem$$Register, $src$$Register) /* preserve */); 340 if ((barrier_data() & G1C2BarrierPostNotNull) == 0) { 341 __ encode_heap_oop($tmp1$$Register, $src$$Register); 342 } else { 343 __ encode_heap_oop_not_null($tmp1$$Register, $src$$Register); 344 } 345 __ stlrw($tmp1$$Register, $mem$$Register); 346 write_barrier_post(masm, this, 347 $mem$$Register /* store_addr */, 348 $src$$Register /* new_val */, 349 $tmp2$$Register /* tmp1 */, 350 $tmp3$$Register /* tmp2 */); 351 %} 352 ins_pipe(pipe_class_memory); 353 %} 354 355 // This pattern is generated automatically from g1_aarch64.m4. 356 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 357 instruct g1CompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr) 358 %{ 359 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 360 match(Set res (CompareAndExchangeP mem (Binary oldval newval))); 361 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 362 ins_cost(2 * VOLATILE_REF_COST); 363 format %{ "cmpxchg $res = $mem, $oldval, $newval\t# ptr" %} 364 ins_encode %{ 365 assert_different_registers($oldval$$Register, $mem$$Register); 366 assert_different_registers($newval$$Register, $mem$$Register); 367 // Pass $oldval to the pre-barrier (instead of loading from $mem), because 368 // $oldval is the only value that can be overwritten. 369 // The same holds for g1CompareAndSwapP and its Acq variant. 370 write_barrier_pre(masm, this, 371 noreg /* obj */, 372 $oldval$$Register /* pre_val */, 373 $tmp1$$Register /* tmp1 */, 374 $tmp2$$Register /* tmp2 */, 375 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 376 RegSet::of($res$$Register) /* no_preserve */); 377 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 378 false /* acquire */, true /* release */, false /* weak */, $res$$Register); 379 write_barrier_post(masm, this, 380 $mem$$Register /* store_addr */, 381 $newval$$Register /* new_val */, 382 $tmp1$$Register /* tmp1 */, 383 $tmp2$$Register /* tmp2 */); 384 %} 385 ins_pipe(pipe_slow); 386 %} 387 388 // This pattern is generated automatically from g1_aarch64.m4. 389 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 390 instruct g1CompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr) 391 %{ 392 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 393 match(Set res (CompareAndExchangeP mem (Binary oldval newval))); 394 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 395 ins_cost(VOLATILE_REF_COST); 396 format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# ptr" %} 397 ins_encode %{ 398 assert_different_registers($oldval$$Register, $mem$$Register); 399 assert_different_registers($newval$$Register, $mem$$Register); 400 // Pass $oldval to the pre-barrier (instead of loading from $mem), because 401 // $oldval is the only value that can be overwritten. 402 // The same holds for g1CompareAndSwapP and its Acq variant. 403 write_barrier_pre(masm, this, 404 noreg /* obj */, 405 $oldval$$Register /* pre_val */, 406 $tmp1$$Register /* tmp1 */, 407 $tmp2$$Register /* tmp2 */, 408 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 409 RegSet::of($res$$Register) /* no_preserve */); 410 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 411 true /* acquire */, true /* release */, false /* weak */, $res$$Register); 412 write_barrier_post(masm, this, 413 $mem$$Register /* store_addr */, 414 $newval$$Register /* new_val */, 415 $tmp1$$Register /* tmp1 */, 416 $tmp2$$Register /* tmp2 */); 417 %} 418 ins_pipe(pipe_slow); 419 %} 420 421 // This pattern is generated automatically from g1_aarch64.m4. 422 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 423 instruct g1CompareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 424 %{ 425 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 426 match(Set res (CompareAndExchangeN mem (Binary oldval newval))); 427 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 428 ins_cost(2 * VOLATILE_REF_COST); 429 format %{ "cmpxchg $res = $mem, $oldval, $newval\t# narrow oop" %} 430 ins_encode %{ 431 assert_different_registers($oldval$$Register, $mem$$Register); 432 assert_different_registers($newval$$Register, $mem$$Register); 433 write_barrier_pre(masm, this, 434 $mem$$Register /* obj */, 435 $tmp1$$Register /* pre_val */, 436 $tmp2$$Register /* tmp1 */, 437 $tmp3$$Register /* tmp2 */, 438 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 439 RegSet::of($res$$Register) /* no_preserve */); 440 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 441 false /* acquire */, true /* release */, false /* weak */, $res$$Register); 442 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 443 write_barrier_post(masm, this, 444 $mem$$Register /* store_addr */, 445 $tmp1$$Register /* new_val */, 446 $tmp2$$Register /* tmp1 */, 447 $tmp3$$Register /* tmp2 */); 448 %} 449 ins_pipe(pipe_slow); 450 %} 451 452 // This pattern is generated automatically from g1_aarch64.m4. 453 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 454 instruct g1CompareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 455 %{ 456 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 457 match(Set res (CompareAndExchangeN mem (Binary oldval newval))); 458 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 459 ins_cost(VOLATILE_REF_COST); 460 format %{ "cmpxchg_acq $res = $mem, $oldval, $newval\t# narrow oop" %} 461 ins_encode %{ 462 assert_different_registers($oldval$$Register, $mem$$Register); 463 assert_different_registers($newval$$Register, $mem$$Register); 464 write_barrier_pre(masm, this, 465 $mem$$Register /* obj */, 466 $tmp1$$Register /* pre_val */, 467 $tmp2$$Register /* tmp1 */, 468 $tmp3$$Register /* tmp2 */, 469 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 470 RegSet::of($res$$Register) /* no_preserve */); 471 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 472 true /* acquire */, true /* release */, false /* weak */, $res$$Register); 473 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 474 write_barrier_post(masm, this, 475 $mem$$Register /* store_addr */, 476 $tmp1$$Register /* new_val */, 477 $tmp2$$Register /* tmp1 */, 478 $tmp3$$Register /* tmp2 */); 479 %} 480 ins_pipe(pipe_slow); 481 %} 482 483 // This pattern is generated automatically from g1_aarch64.m4. 484 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 485 instruct g1CompareAndSwapP(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr) 486 %{ 487 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 488 match(Set res (CompareAndSwapP mem (Binary oldval newval))); 489 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); 490 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 491 ins_cost(2 * VOLATILE_REF_COST); 492 format %{ "cmpxchg $mem, $oldval, $newval\t# (ptr)\n\t" 493 "cset $res, EQ" %} 494 ins_encode %{ 495 assert_different_registers($oldval$$Register, $mem$$Register); 496 assert_different_registers($newval$$Register, $mem$$Register); 497 write_barrier_pre(masm, this, 498 noreg /* obj */, 499 $oldval$$Register /* pre_val */, 500 $tmp1$$Register /* tmp1 */, 501 $tmp2$$Register /* tmp2 */, 502 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 503 RegSet::of($res$$Register) /* no_preserve */); 504 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 505 false /* acquire */, true /* release */, false /* weak */, noreg); 506 __ cset($res$$Register, Assembler::EQ); 507 write_barrier_post(masm, this, 508 $mem$$Register /* store_addr */, 509 $newval$$Register /* new_val */, 510 $tmp1$$Register /* tmp1 */, 511 $tmp2$$Register /* tmp2 */); 512 %} 513 ins_pipe(pipe_slow); 514 %} 515 516 // This pattern is generated automatically from g1_aarch64.m4. 517 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 518 instruct g1CompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegP oldval, rFlagsReg cr) 519 %{ 520 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 521 match(Set res (CompareAndSwapP mem (Binary oldval newval))); 522 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); 523 effect(TEMP res, TEMP tmp1, TEMP tmp2, KILL cr); 524 ins_cost(VOLATILE_REF_COST); 525 format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (ptr)\n\t" 526 "cset $res, EQ" %} 527 ins_encode %{ 528 assert_different_registers($oldval$$Register, $mem$$Register); 529 assert_different_registers($newval$$Register, $mem$$Register); 530 write_barrier_pre(masm, this, 531 noreg /* obj */, 532 $oldval$$Register /* pre_val */, 533 $tmp1$$Register /* tmp1 */, 534 $tmp2$$Register /* tmp2 */, 535 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 536 RegSet::of($res$$Register) /* no_preserve */); 537 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, 538 true /* acquire */, true /* release */, false /* weak */, noreg); 539 __ cset($res$$Register, Assembler::EQ); 540 write_barrier_post(masm, this, 541 $mem$$Register /* store_addr */, 542 $newval$$Register /* new_val */, 543 $tmp1$$Register /* tmp1 */, 544 $tmp2$$Register /* tmp2 */); 545 %} 546 ins_pipe(pipe_slow); 547 %} 548 549 // This pattern is generated automatically from g1_aarch64.m4. 550 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 551 instruct g1CompareAndSwapN(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr) 552 %{ 553 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 554 match(Set res (CompareAndSwapN mem (Binary oldval newval))); 555 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); 556 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 557 ins_cost(2 * VOLATILE_REF_COST); 558 format %{ "cmpxchg $mem, $oldval, $newval\t# (narrow oop)\n\t" 559 "cset $res, EQ" %} 560 ins_encode %{ 561 assert_different_registers($oldval$$Register, $mem$$Register); 562 assert_different_registers($newval$$Register, $mem$$Register); 563 write_barrier_pre(masm, this, 564 $mem$$Register /* obj */, 565 $tmp1$$Register /* pre_val */, 566 $tmp2$$Register /* tmp1 */, 567 $tmp3$$Register /* tmp2 */, 568 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 569 RegSet::of($res$$Register) /* no_preserve */); 570 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 571 false /* acquire */, true /* release */, false /* weak */, noreg); 572 __ cset($res$$Register, Assembler::EQ); 573 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 574 write_barrier_post(masm, this, 575 $mem$$Register /* store_addr */, 576 $tmp1$$Register /* new_val */, 577 $tmp2$$Register /* tmp1 */, 578 $tmp3$$Register /* tmp2 */); 579 %} 580 ins_pipe(pipe_slow); 581 %} 582 583 // This pattern is generated automatically from g1_aarch64.m4. 584 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 585 instruct g1CompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegN oldval, rFlagsReg cr) 586 %{ 587 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 588 match(Set res (CompareAndSwapN mem (Binary oldval newval))); 589 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); 590 effect(TEMP res, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 591 ins_cost(VOLATILE_REF_COST); 592 format %{ "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop)\n\t" 593 "cset $res, EQ" %} 594 ins_encode %{ 595 assert_different_registers($oldval$$Register, $mem$$Register); 596 assert_different_registers($newval$$Register, $mem$$Register); 597 write_barrier_pre(masm, this, 598 $mem$$Register /* obj */, 599 $tmp1$$Register /* pre_val */, 600 $tmp2$$Register /* tmp1 */, 601 $tmp3$$Register /* tmp2 */, 602 RegSet::of($mem$$Register, $oldval$$Register, $newval$$Register) /* preserve */, 603 RegSet::of($res$$Register) /* no_preserve */); 604 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::word, 605 true /* acquire */, true /* release */, false /* weak */, noreg); 606 __ cset($res$$Register, Assembler::EQ); 607 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 608 write_barrier_post(masm, this, 609 $mem$$Register /* store_addr */, 610 $tmp1$$Register /* new_val */, 611 $tmp2$$Register /* tmp1 */, 612 $tmp3$$Register /* tmp2 */); 613 %} 614 ins_pipe(pipe_slow); 615 %} 616 617 // This pattern is generated automatically from g1_aarch64.m4. 618 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 619 instruct g1GetAndSetP(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr) 620 %{ 621 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 622 match(Set preval (GetAndSetP mem newval)); 623 effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr); 624 ins_cost(2 * VOLATILE_REF_COST); 625 format %{ "atomic_xchg $preval, $newval, [$mem]" %} 626 ins_encode %{ 627 assert_different_registers($mem$$Register, $newval$$Register); 628 write_barrier_pre(masm, this, 629 $mem$$Register /* obj */, 630 $preval$$Register /* pre_val (as a temporary register) */, 631 $tmp1$$Register /* tmp1 */, 632 $tmp2$$Register /* tmp2 */, 633 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 634 __ atomic_xchg($preval$$Register, $newval$$Register, $mem$$Register); 635 write_barrier_post(masm, this, 636 $mem$$Register /* store_addr */, 637 $newval$$Register /* new_val */, 638 $tmp1$$Register /* tmp1 */, 639 $tmp2$$Register /* tmp2 */); 640 %} 641 ins_pipe(pipe_serial); 642 %} 643 644 // This pattern is generated automatically from g1_aarch64.m4. 645 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 646 instruct g1GetAndSetPAcq(indirect mem, iRegP newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp preval, rFlagsReg cr) 647 %{ 648 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 649 match(Set preval (GetAndSetP mem newval)); 650 effect(TEMP preval, TEMP tmp1, TEMP tmp2, KILL cr); 651 ins_cost(VOLATILE_REF_COST); 652 format %{ "atomic_xchg_acq $preval, $newval, [$mem]" %} 653 ins_encode %{ 654 assert_different_registers($mem$$Register, $newval$$Register); 655 write_barrier_pre(masm, this, 656 $mem$$Register /* obj */, 657 $preval$$Register /* pre_val (as a temporary register) */, 658 $tmp1$$Register /* tmp1 */, 659 $tmp2$$Register /* tmp2 */, 660 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 661 __ atomic_xchgal($preval$$Register, $newval$$Register, $mem$$Register); 662 write_barrier_post(masm, this, 663 $mem$$Register /* store_addr */, 664 $newval$$Register /* new_val */, 665 $tmp1$$Register /* tmp1 */, 666 $tmp2$$Register /* tmp2 */); 667 %} 668 ins_pipe(pipe_serial); 669 %} 670 671 // This pattern is generated automatically from g1_aarch64.m4. 672 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 673 instruct g1GetAndSetN(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr) 674 %{ 675 predicate(UseG1GC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 676 match(Set preval (GetAndSetN mem newval)); 677 effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 678 ins_cost(2 * VOLATILE_REF_COST); 679 format %{ "atomic_xchgw $preval, $newval, [$mem]" %} 680 ins_encode %{ 681 assert_different_registers($mem$$Register, $newval$$Register); 682 write_barrier_pre(masm, this, 683 $mem$$Register /* obj */, 684 $tmp1$$Register /* pre_val */, 685 $tmp2$$Register /* tmp1 */, 686 $tmp3$$Register /* tmp2 */, 687 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 688 __ atomic_xchgw($preval$$Register, $newval$$Register, $mem$$Register); 689 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 690 write_barrier_post(masm, this, 691 $mem$$Register /* store_addr */, 692 $tmp1$$Register /* new_val */, 693 $tmp2$$Register /* tmp1 */, 694 $tmp3$$Register /* tmp2 */); 695 %} 696 ins_pipe(pipe_serial); 697 %} 698 699 // This pattern is generated automatically from g1_aarch64.m4. 700 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 701 instruct g1GetAndSetNAcq(indirect mem, iRegN newval, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, iRegNNoSp preval, rFlagsReg cr) 702 %{ 703 predicate(UseG1GC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() != 0); 704 match(Set preval (GetAndSetN mem newval)); 705 effect(TEMP preval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 706 ins_cost(VOLATILE_REF_COST); 707 format %{ "atomic_xchgw_acq $preval, $newval, [$mem]" %} 708 ins_encode %{ 709 assert_different_registers($mem$$Register, $newval$$Register); 710 write_barrier_pre(masm, this, 711 $mem$$Register /* obj */, 712 $tmp1$$Register /* pre_val */, 713 $tmp2$$Register /* tmp1 */, 714 $tmp3$$Register /* tmp2 */, 715 RegSet::of($mem$$Register, $preval$$Register, $newval$$Register) /* preserve */); 716 __ atomic_xchgalw($preval$$Register, $newval$$Register, $mem$$Register); 717 __ decode_heap_oop($tmp1$$Register, $newval$$Register); 718 write_barrier_post(masm, this, 719 $mem$$Register /* store_addr */, 720 $tmp1$$Register /* new_val */, 721 $tmp2$$Register /* tmp1 */, 722 $tmp3$$Register /* tmp2 */); 723 %} 724 ins_pipe(pipe_serial); 725 %} 726 727 // This pattern is generated automatically from g1_aarch64.m4. 728 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 729 instruct g1LoadP(iRegPNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, rFlagsReg cr) 730 %{ 731 // This instruction does not need an acquiring counterpart because it is only 732 // used for reference loading (Reference::get()). The same holds for g1LoadN. 733 predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); 734 match(Set dst (LoadP mem)); 735 effect(TEMP dst, TEMP tmp1, TEMP tmp2, KILL cr); 736 ins_cost(4 * INSN_COST); 737 format %{ "ldr $dst, $mem\t# ptr" %} 738 ins_encode %{ 739 __ ldr($dst$$Register, $mem$$Register); 740 write_barrier_pre(masm, this, 741 noreg /* obj */, 742 $dst$$Register /* pre_val */, 743 $tmp1$$Register /* tmp1 */, 744 $tmp2$$Register /* tmp2 */); 745 %} 746 ins_pipe(iload_reg_mem); 747 %} 748 749 // This pattern is generated automatically from g1_aarch64.m4. 750 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE 751 instruct g1LoadN(iRegNNoSp dst, indirect mem, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3, rFlagsReg cr) 752 %{ 753 predicate(UseG1GC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); 754 match(Set dst (LoadN mem)); 755 effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); 756 ins_cost(4 * INSN_COST); 757 format %{ "ldrw $dst, $mem\t# compressed ptr" %} 758 ins_encode %{ 759 __ ldrw($dst$$Register, $mem$$Register); 760 if ((barrier_data() & G1C2BarrierPre) != 0) { 761 __ decode_heap_oop($tmp1$$Register, $dst$$Register); 762 write_barrier_pre(masm, this, 763 noreg /* obj */, 764 $tmp1$$Register /* pre_val */, 765 $tmp2$$Register /* tmp1 */, 766 $tmp3$$Register /* tmp2 */); 767 } 768 %} 769 ins_pipe(iload_reg_mem); 770 %} 771 772 // END This section of the file is automatically generated. Do not edit --------------