1 /* 2 * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "gc/g1/g1BarrierSet.hpp" 28 #include "gc/g1/g1BarrierSetAssembler.hpp" 29 #include "gc/g1/g1BarrierSetRuntime.hpp" 30 #include "gc/g1/g1CardTable.hpp" 31 #include "gc/g1/g1HeapRegion.hpp" 32 #include "gc/g1/g1ThreadLocalData.hpp" 33 #include "gc/shared/collectedHeap.hpp" 34 #include "interpreter/interp_masm.hpp" 35 #include "runtime/javaThread.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #ifdef COMPILER1 38 #include "c1/c1_LIRAssembler.hpp" 39 #include "c1/c1_MacroAssembler.hpp" 40 #include "gc/g1/c1/g1BarrierSetC1.hpp" 41 #endif // COMPILER1 42 #ifdef COMPILER2 43 #include "gc/g1/c2/g1BarrierSetC2.hpp" 44 #endif // COMPILER2 45 46 #define __ masm-> 47 48 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, 49 Register addr, Register count, RegSet saved_regs) { 50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 51 if (!dest_uninitialized) { 52 Label done; 53 Address in_progress(rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 54 55 // Is marking active? 56 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 57 __ ldrw(rscratch1, in_progress); 58 } else { 59 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 60 __ ldrb(rscratch1, in_progress); 61 } 62 __ cbzw(rscratch1, done); 63 64 __ push(saved_regs, sp); 65 if (count == c_rarg0) { 66 if (addr == c_rarg1) { 67 // exactly backwards!! 68 __ mov(rscratch1, c_rarg0); 69 __ mov(c_rarg0, c_rarg1); 70 __ mov(c_rarg1, rscratch1); 71 } else { 72 __ mov(c_rarg1, count); 73 __ mov(c_rarg0, addr); 74 } 75 } else { 76 __ mov(c_rarg0, addr); 77 __ mov(c_rarg1, count); 78 } 79 if (UseCompressedOops) { 80 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2); 81 } else { 82 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2); 83 } 84 __ pop(saved_regs, sp); 85 86 __ bind(done); 87 } 88 } 89 90 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, 91 Register start, Register count, Register scratch, RegSet saved_regs) { 92 __ push(saved_regs, sp); 93 assert_different_registers(start, count, scratch); 94 assert_different_registers(c_rarg0, count); 95 __ mov(c_rarg0, start); 96 __ mov(c_rarg1, count); 97 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2); 98 __ pop(saved_regs, sp); 99 } 100 101 static void generate_queue_test_and_insertion(MacroAssembler* masm, ByteSize index_offset, ByteSize buffer_offset, Label& runtime, 102 const Register thread, const Register value, const Register temp1, const Register temp2) { 103 // Can we store a value in the given thread's buffer? 104 // (The index field is typed as size_t.) 105 __ ldr(temp1, Address(thread, in_bytes(index_offset))); // temp1 := *(index address) 106 __ cbz(temp1, runtime); // jump to runtime if index == 0 (full buffer) 107 // The buffer is not full, store value into it. 108 __ sub(temp1, temp1, wordSize); // temp1 := next index 109 __ str(temp1, Address(thread, in_bytes(index_offset))); // *(index address) := next index 110 __ ldr(temp2, Address(thread, in_bytes(buffer_offset))); // temp2 := buffer address 111 __ str(value, Address(temp2, temp1)); // *(buffer address + next index) := value 112 } 113 114 static void generate_pre_barrier_fast_path(MacroAssembler* masm, 115 const Register thread, 116 const Register tmp1) { 117 Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 118 // Is marking active? 119 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 120 __ ldrw(tmp1, in_progress); 121 } else { 122 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 123 __ ldrb(tmp1, in_progress); 124 } 125 } 126 127 static void generate_pre_barrier_slow_path(MacroAssembler* masm, 128 const Register obj, 129 const Register pre_val, 130 const Register thread, 131 const Register tmp1, 132 const Register tmp2, 133 Label& done, 134 Label& runtime) { 135 // Do we need to load the previous value? 136 if (obj != noreg) { 137 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 138 } 139 // Is the previous value null? 140 __ cbz(pre_val, done); 141 generate_queue_test_and_insertion(masm, 142 G1ThreadLocalData::satb_mark_queue_index_offset(), 143 G1ThreadLocalData::satb_mark_queue_buffer_offset(), 144 runtime, 145 thread, pre_val, tmp1, tmp2); 146 __ b(done); 147 } 148 149 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, 150 Register obj, 151 Register pre_val, 152 Register thread, 153 Register tmp1, 154 Register tmp2, 155 bool tosca_live, 156 bool expand_call) { 157 // If expand_call is true then we expand the call_VM_leaf macro 158 // directly to skip generating the check by 159 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 160 161 assert(thread == rthread, "must be"); 162 163 Label done; 164 Label runtime; 165 166 assert_different_registers(obj, pre_val, tmp1, tmp2); 167 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register"); 168 169 generate_pre_barrier_fast_path(masm, thread, tmp1); 170 // If marking is not active (*(mark queue active address) == 0), jump to done 171 __ cbzw(tmp1, done); 172 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime); 173 174 __ bind(runtime); 175 176 __ push_call_clobbered_registers(); 177 178 // Calling the runtime using the regular call_VM_leaf mechanism generates 179 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 180 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr. 181 // 182 // If we care generating the pre-barrier without a frame (e.g. in the 183 // intrinsified Reference.get() routine) then rfp might be pointing to 184 // the caller frame and so this check will most likely fail at runtime. 185 // 186 // Expanding the call directly bypasses the generation of the check. 187 // So when we do not have have a full interpreter frame on the stack 188 // expand_call should be passed true. 189 190 if (expand_call) { 191 assert(pre_val != c_rarg1, "smashed arg"); 192 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); 193 } else { 194 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); 195 } 196 197 __ pop_call_clobbered_registers(); 198 199 __ bind(done); 200 201 } 202 203 static void generate_post_barrier_fast_path(MacroAssembler* masm, 204 const Register store_addr, 205 const Register new_val, 206 const Register tmp1, 207 const Register tmp2, 208 Label& done, 209 bool new_val_may_be_null) { 210 // Does store cross heap regions? 211 __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value 212 __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes) 213 __ cbz(tmp1, done); 214 // Crosses regions, storing null? 215 if (new_val_may_be_null) { 216 __ cbz(new_val, done); 217 } 218 // Storing region crossing non-null, is card young? 219 __ lsr(tmp1, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base 220 __ load_byte_map_base(tmp2); // tmp2 := card table base address 221 __ add(tmp1, tmp1, tmp2); // tmp1 := card address 222 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card 223 __ cmpw(tmp2, (int)G1CardTable::g1_young_card_val()); // tmp2 := card == young_card_val? 224 } 225 226 static void generate_post_barrier_slow_path(MacroAssembler* masm, 227 const Register thread, 228 const Register tmp1, 229 const Register tmp2, 230 Label& done, 231 Label& runtime) { 232 __ membar(Assembler::StoreLoad); // StoreLoad membar 233 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card 234 __ cbzw(tmp2, done); 235 // Storing a region crossing, non-null oop, card is clean. 236 // Dirty card and log. 237 STATIC_ASSERT(CardTable::dirty_card_val() == 0); 238 __ strb(zr, Address(tmp1)); // *(card address) := dirty_card_val 239 generate_queue_test_and_insertion(masm, 240 G1ThreadLocalData::dirty_card_queue_index_offset(), 241 G1ThreadLocalData::dirty_card_queue_buffer_offset(), 242 runtime, 243 thread, tmp1, tmp2, rscratch1); 244 __ b(done); 245 } 246 247 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, 248 Register store_addr, 249 Register new_val, 250 Register thread, 251 Register tmp1, 252 Register tmp2) { 253 assert(thread == rthread, "must be"); 254 assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, 255 rscratch1); 256 assert(store_addr != noreg && new_val != noreg && tmp1 != noreg 257 && tmp2 != noreg, "expecting a register"); 258 259 Label done; 260 Label runtime; 261 262 generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */); 263 // If card is young, jump to done 264 __ br(Assembler::EQ, done); 265 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime); 266 267 __ bind(runtime); 268 // save the live input values 269 RegSet saved = RegSet::of(store_addr); 270 __ push(saved, sp); 271 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread); 272 __ pop(saved, sp); 273 274 __ bind(done); 275 } 276 277 #if defined(COMPILER2) 278 279 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) { 280 SaveLiveRegisters save_registers(masm, stub); 281 if (c_rarg0 != arg) { 282 __ mov(c_rarg0, arg); 283 } 284 __ mov(c_rarg1, rthread); 285 __ mov(rscratch1, runtime_path); 286 __ blr(rscratch1); 287 } 288 289 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm, 290 Register obj, 291 Register pre_val, 292 Register thread, 293 Register tmp1, 294 Register tmp2, 295 G1PreBarrierStubC2* stub) { 296 assert(thread == rthread, "must be"); 297 assert_different_registers(obj, pre_val, tmp1, tmp2); 298 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register"); 299 300 stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2); 301 302 generate_pre_barrier_fast_path(masm, thread, tmp1); 303 // If marking is active (*(mark queue active address) != 0), jump to stub (slow path) 304 __ cbnzw(tmp1, *stub->entry()); 305 306 __ bind(*stub->continuation()); 307 } 308 309 void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm, 310 G1PreBarrierStubC2* stub) const { 311 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 312 Label runtime; 313 Register obj = stub->obj(); 314 Register pre_val = stub->pre_val(); 315 Register thread = stub->thread(); 316 Register tmp1 = stub->tmp1(); 317 Register tmp2 = stub->tmp2(); 318 319 __ bind(*stub->entry()); 320 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, *stub->continuation(), runtime); 321 322 __ bind(runtime); 323 generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry)); 324 __ b(*stub->continuation()); 325 } 326 327 void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm, 328 Register store_addr, 329 Register new_val, 330 Register thread, 331 Register tmp1, 332 Register tmp2, 333 G1PostBarrierStubC2* stub) { 334 assert(thread == rthread, "must be"); 335 assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, 336 rscratch1); 337 assert(store_addr != noreg && new_val != noreg && tmp1 != noreg 338 && tmp2 != noreg, "expecting a register"); 339 340 stub->initialize_registers(thread, tmp1, tmp2); 341 342 bool new_val_may_be_null = (stub->barrier_data() & G1C2BarrierPostNotNull) == 0; 343 generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, *stub->continuation(), new_val_may_be_null); 344 // If card is not young, jump to stub (slow path) 345 __ br(Assembler::NE, *stub->entry()); 346 347 __ bind(*stub->continuation()); 348 } 349 350 void G1BarrierSetAssembler::generate_c2_post_barrier_stub(MacroAssembler* masm, 351 G1PostBarrierStubC2* stub) const { 352 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 353 Label runtime; 354 Register thread = stub->thread(); 355 Register tmp1 = stub->tmp1(); // tmp1 holds the card address. 356 Register tmp2 = stub->tmp2(); 357 assert(stub->tmp3() == noreg, "not needed in this platform"); 358 359 __ bind(*stub->entry()); 360 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, *stub->continuation(), runtime); 361 362 __ bind(runtime); 363 generate_c2_barrier_runtime_call(masm, stub, tmp1, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)); 364 __ b(*stub->continuation()); 365 } 366 367 #endif // COMPILER2 368 369 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 370 Register dst, Address src, Register tmp1, Register tmp2) { 371 bool on_oop = is_reference_type(type); 372 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 373 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 374 bool on_reference = on_weak || on_phantom; 375 ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); 376 if (on_oop && on_reference) { 377 // LR is live. It must be saved around calls. 378 __ enter(/*strip_ret_addr*/true); // barrier may call runtime 379 // Generate the G1 pre-barrier code to log the value of 380 // the referent field in an SATB buffer. 381 g1_write_barrier_pre(masm /* masm */, 382 noreg /* obj */, 383 dst /* pre_val */, 384 rthread /* thread */, 385 tmp1 /* tmp1 */, 386 tmp2 /* tmp2 */, 387 true /* tosca_live */, 388 true /* expand_call */); 389 __ leave(); 390 } 391 } 392 393 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 394 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 395 // flatten object address if needed 396 if (dst.index() == noreg && dst.offset() == 0) { 397 if (dst.base() != tmp3) { 398 __ mov(tmp3, dst.base()); 399 } 400 } else { 401 __ lea(tmp3, dst); 402 } 403 404 g1_write_barrier_pre(masm, 405 tmp3 /* obj */, 406 tmp2 /* pre_val */, 407 rthread /* thread */, 408 tmp1 /* tmp1 */, 409 rscratch2 /* tmp2 */, 410 val != noreg /* tosca_live */, 411 false /* expand_call */); 412 413 if (val == noreg) { 414 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg); 415 } else { 416 // G1 barrier needs uncompressed oop for region cross check. 417 Register new_val = val; 418 if (UseCompressedOops) { 419 new_val = rscratch2; 420 __ mov(new_val, val); 421 } 422 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg); 423 g1_write_barrier_post(masm, 424 tmp3 /* store_adr */, 425 new_val /* new_val */, 426 rthread /* thread */, 427 tmp1 /* tmp1 */, 428 tmp2 /* tmp2 */); 429 } 430 431 } 432 433 #ifdef COMPILER1 434 435 #undef __ 436 #define __ ce->masm()-> 437 438 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) { 439 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 440 // At this point we know that marking is in progress. 441 // If do_load() is true then we have to emit the 442 // load of the previous value; otherwise it has already 443 // been loaded into _pre_val. 444 445 __ bind(*stub->entry()); 446 447 assert(stub->pre_val()->is_register(), "Precondition."); 448 449 Register pre_val_reg = stub->pre_val()->as_register(); 450 451 if (stub->do_load()) { 452 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/); 453 } 454 __ cbz(pre_val_reg, *stub->continuation()); 455 ce->store_parameter(stub->pre_val()->as_register(), 0); 456 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 457 __ b(*stub->continuation()); 458 } 459 460 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) { 461 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 462 __ bind(*stub->entry()); 463 assert(stub->addr()->is_register(), "Precondition."); 464 assert(stub->new_val()->is_register(), "Precondition."); 465 Register new_val_reg = stub->new_val()->as_register(); 466 __ cbz(new_val_reg, *stub->continuation()); 467 ce->store_parameter(stub->addr()->as_pointer_register(), 0); 468 __ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin())); 469 __ b(*stub->continuation()); 470 } 471 472 #undef __ 473 474 #define __ sasm-> 475 476 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 477 __ prologue("g1_pre_barrier", false); 478 479 // arg0 : previous value of memory 480 481 BarrierSet* bs = BarrierSet::barrier_set(); 482 483 const Register pre_val = r0; 484 const Register thread = rthread; 485 const Register tmp = rscratch1; 486 487 Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 488 Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); 489 Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); 490 491 Label done; 492 Label runtime; 493 494 // Is marking still active? 495 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 496 __ ldrw(tmp, in_progress); 497 } else { 498 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 499 __ ldrb(tmp, in_progress); 500 } 501 __ cbzw(tmp, done); 502 503 // Can we store original value in the thread's buffer? 504 __ ldr(tmp, queue_index); 505 __ cbz(tmp, runtime); 506 507 __ sub(tmp, tmp, wordSize); 508 __ str(tmp, queue_index); 509 __ ldr(rscratch2, buffer); 510 __ add(tmp, tmp, rscratch2); 511 __ load_parameter(0, rscratch2); 512 __ str(rscratch2, Address(tmp, 0)); 513 __ b(done); 514 515 __ bind(runtime); 516 __ push_call_clobbered_registers(); 517 __ load_parameter(0, pre_val); 518 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); 519 __ pop_call_clobbered_registers(); 520 __ bind(done); 521 522 __ epilogue(); 523 } 524 525 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) { 526 __ prologue("g1_post_barrier", false); 527 528 // arg0: store_address 529 Address store_addr(rfp, 2*BytesPerWord); 530 531 BarrierSet* bs = BarrierSet::barrier_set(); 532 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); 533 CardTable* ct = ctbs->card_table(); 534 535 Label done; 536 Label runtime; 537 538 // At this point we know new_value is non-null and the new_value crosses regions. 539 // Must check to see if card is already dirty 540 541 const Register thread = rthread; 542 543 Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 544 Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 545 546 const Register card_offset = rscratch2; 547 // LR is free here, so we can use it to hold the byte_map_base. 548 const Register byte_map_base = lr; 549 550 assert_different_registers(card_offset, byte_map_base, rscratch1); 551 552 __ load_parameter(0, card_offset); 553 __ lsr(card_offset, card_offset, CardTable::card_shift()); 554 __ load_byte_map_base(byte_map_base); 555 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 556 __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val()); 557 __ br(Assembler::EQ, done); 558 559 assert((int)CardTable::dirty_card_val() == 0, "must be 0"); 560 561 __ membar(Assembler::StoreLoad); 562 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 563 __ cbzw(rscratch1, done); 564 565 // storing region crossing non-null, card is clean. 566 // dirty card and log. 567 __ strb(zr, Address(byte_map_base, card_offset)); 568 569 // Convert card offset into an address in card_addr 570 Register card_addr = card_offset; 571 __ add(card_addr, byte_map_base, card_addr); 572 573 __ ldr(rscratch1, queue_index); 574 __ cbz(rscratch1, runtime); 575 __ sub(rscratch1, rscratch1, wordSize); 576 __ str(rscratch1, queue_index); 577 578 // Reuse LR to hold buffer_addr 579 const Register buffer_addr = lr; 580 581 __ ldr(buffer_addr, buffer); 582 __ str(card_addr, Address(buffer_addr, rscratch1)); 583 __ b(done); 584 585 __ bind(runtime); 586 __ push_call_clobbered_registers(); 587 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); 588 __ pop_call_clobbered_registers(); 589 __ bind(done); 590 __ epilogue(); 591 } 592 593 #undef __ 594 595 #endif // COMPILER1