1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/macroAssembler.inline.hpp" 26 #include "gc/g1/g1BarrierSet.hpp" 27 #include "gc/g1/g1BarrierSetAssembler.hpp" 28 #include "gc/g1/g1BarrierSetRuntime.hpp" 29 #include "gc/g1/g1CardTable.hpp" 30 #include "gc/g1/g1HeapRegion.hpp" 31 #include "gc/g1/g1ThreadLocalData.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "runtime/javaThread.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #ifdef COMPILER1 37 #include "c1/c1_LIRAssembler.hpp" 38 #include "c1/c1_MacroAssembler.hpp" 39 #include "gc/g1/c1/g1BarrierSetC1.hpp" 40 #endif // COMPILER1 41 #ifdef COMPILER2 42 #include "gc/g1/c2/g1BarrierSetC2.hpp" 43 #endif // COMPILER2 44 45 #define __ masm-> 46 47 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, 48 Register addr, Register count, RegSet saved_regs) { 49 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 50 if (!dest_uninitialized) { 51 Label done; 52 Address in_progress(rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 53 54 // Is marking active? 55 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 56 __ ldrw(rscratch1, in_progress); 57 } else { 58 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 59 __ ldrb(rscratch1, in_progress); 60 } 61 __ cbzw(rscratch1, done); 62 63 __ push(saved_regs, sp); 64 if (count == c_rarg0) { 65 if (addr == c_rarg1) { 66 // exactly backwards!! 67 __ mov(rscratch1, c_rarg0); 68 __ mov(c_rarg0, c_rarg1); 69 __ mov(c_rarg1, rscratch1); 70 } else { 71 __ mov(c_rarg1, count); 72 __ mov(c_rarg0, addr); 73 } 74 } else { 75 __ mov(c_rarg0, addr); 76 __ mov(c_rarg1, count); 77 } 78 if (UseCompressedOops) { 79 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2); 80 } else { 81 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2); 82 } 83 __ pop(saved_regs, sp); 84 85 __ bind(done); 86 } 87 } 88 89 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, 90 Register start, Register count, Register scratch, RegSet saved_regs) { 91 __ push(saved_regs, sp); 92 assert_different_registers(start, count, scratch); 93 assert_different_registers(c_rarg0, count); 94 __ mov(c_rarg0, start); 95 __ mov(c_rarg1, count); 96 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2); 97 __ pop(saved_regs, sp); 98 } 99 100 static void generate_queue_test_and_insertion(MacroAssembler* masm, ByteSize index_offset, ByteSize buffer_offset, Label& runtime, 101 const Register thread, const Register value, const Register temp1, const Register temp2) { 102 // Can we store a value in the given thread's buffer? 103 // (The index field is typed as size_t.) 104 __ ldr(temp1, Address(thread, in_bytes(index_offset))); // temp1 := *(index address) 105 __ cbz(temp1, runtime); // jump to runtime if index == 0 (full buffer) 106 // The buffer is not full, store value into it. 107 __ sub(temp1, temp1, wordSize); // temp1 := next index 108 __ str(temp1, Address(thread, in_bytes(index_offset))); // *(index address) := next index 109 __ ldr(temp2, Address(thread, in_bytes(buffer_offset))); // temp2 := buffer address 110 __ str(value, Address(temp2, temp1)); // *(buffer address + next index) := value 111 } 112 113 static void generate_pre_barrier_fast_path(MacroAssembler* masm, 114 const Register thread, 115 const Register tmp1) { 116 Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 117 // Is marking active? 118 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 119 __ ldrw(tmp1, in_progress); 120 } else { 121 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 122 __ ldrb(tmp1, in_progress); 123 } 124 } 125 126 static void generate_pre_barrier_slow_path(MacroAssembler* masm, 127 const Register obj, 128 const Register pre_val, 129 const Register thread, 130 const Register tmp1, 131 const Register tmp2, 132 Label& done, 133 Label& runtime) { 134 // Do we need to load the previous value? 135 if (obj != noreg) { 136 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 137 } 138 // Is the previous value null? 139 __ cbz(pre_val, done); 140 generate_queue_test_and_insertion(masm, 141 G1ThreadLocalData::satb_mark_queue_index_offset(), 142 G1ThreadLocalData::satb_mark_queue_buffer_offset(), 143 runtime, 144 thread, pre_val, tmp1, tmp2); 145 __ b(done); 146 } 147 148 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, 149 Register obj, 150 Register pre_val, 151 Register thread, 152 Register tmp1, 153 Register tmp2, 154 bool tosca_live, 155 bool expand_call) { 156 // If expand_call is true then we expand the call_VM_leaf macro 157 // directly to skip generating the check by 158 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 159 160 assert(thread == rthread, "must be"); 161 162 Label done; 163 Label runtime; 164 165 assert_different_registers(obj, pre_val, tmp1, tmp2); 166 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register"); 167 168 generate_pre_barrier_fast_path(masm, thread, tmp1); 169 // If marking is not active (*(mark queue active address) == 0), jump to done 170 __ cbzw(tmp1, done); 171 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime); 172 173 __ bind(runtime); 174 175 __ push_call_clobbered_registers(); 176 177 // Calling the runtime using the regular call_VM_leaf mechanism generates 178 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 179 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr. 180 // 181 // If we care generating the pre-barrier without a frame (e.g. in the 182 // intrinsified Reference.get() routine) then rfp might be pointing to 183 // the caller frame and so this check will most likely fail at runtime. 184 // 185 // Expanding the call directly bypasses the generation of the check. 186 // So when we do not have have a full interpreter frame on the stack 187 // expand_call should be passed true. 188 189 if (expand_call) { 190 assert(pre_val != c_rarg1, "smashed arg"); 191 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); 192 } else { 193 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); 194 } 195 196 __ pop_call_clobbered_registers(); 197 198 __ bind(done); 199 200 } 201 202 static void generate_post_barrier_fast_path(MacroAssembler* masm, 203 const Register store_addr, 204 const Register new_val, 205 const Register tmp1, 206 const Register tmp2, 207 Label& done, 208 bool new_val_may_be_null) { 209 // Does store cross heap regions? 210 __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value 211 __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes) 212 __ cbz(tmp1, done); 213 // Crosses regions, storing null? 214 if (new_val_may_be_null) { 215 __ cbz(new_val, done); 216 } 217 // Storing region crossing non-null, is card young? 218 __ lsr(tmp1, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base 219 __ load_byte_map_base(tmp2); // tmp2 := card table base address 220 __ add(tmp1, tmp1, tmp2); // tmp1 := card address 221 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card 222 __ cmpw(tmp2, (int)G1CardTable::g1_young_card_val()); // tmp2 := card == young_card_val? 223 } 224 225 static void generate_post_barrier_slow_path(MacroAssembler* masm, 226 const Register thread, 227 const Register tmp1, 228 const Register tmp2, 229 Label& done, 230 Label& runtime) { 231 __ membar(Assembler::StoreLoad); // StoreLoad membar 232 __ ldrb(tmp2, Address(tmp1)); // tmp2 := card 233 __ cbzw(tmp2, done); 234 // Storing a region crossing, non-null oop, card is clean. 235 // Dirty card and log. 236 STATIC_ASSERT(CardTable::dirty_card_val() == 0); 237 __ strb(zr, Address(tmp1)); // *(card address) := dirty_card_val 238 generate_queue_test_and_insertion(masm, 239 G1ThreadLocalData::dirty_card_queue_index_offset(), 240 G1ThreadLocalData::dirty_card_queue_buffer_offset(), 241 runtime, 242 thread, tmp1, tmp2, rscratch1); 243 __ b(done); 244 } 245 246 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, 247 Register store_addr, 248 Register new_val, 249 Register thread, 250 Register tmp1, 251 Register tmp2) { 252 assert(thread == rthread, "must be"); 253 assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, 254 rscratch1); 255 assert(store_addr != noreg && new_val != noreg && tmp1 != noreg 256 && tmp2 != noreg, "expecting a register"); 257 258 Label done; 259 Label runtime; 260 261 generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */); 262 // If card is young, jump to done 263 __ br(Assembler::EQ, done); 264 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime); 265 266 __ bind(runtime); 267 // save the live input values 268 RegSet saved = RegSet::of(store_addr); 269 __ push(saved, sp); 270 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread); 271 __ pop(saved, sp); 272 273 __ bind(done); 274 } 275 276 #if defined(COMPILER2) 277 278 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) { 279 SaveLiveRegisters save_registers(masm, stub); 280 if (c_rarg0 != arg) { 281 __ mov(c_rarg0, arg); 282 } 283 __ mov(c_rarg1, rthread); 284 __ mov(rscratch1, runtime_path); 285 __ blr(rscratch1); 286 } 287 288 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm, 289 Register obj, 290 Register pre_val, 291 Register thread, 292 Register tmp1, 293 Register tmp2, 294 G1PreBarrierStubC2* stub) { 295 assert(thread == rthread, "must be"); 296 assert_different_registers(obj, pre_val, tmp1, tmp2); 297 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register"); 298 299 stub->initialize_registers(obj, pre_val, thread, tmp1, tmp2); 300 301 generate_pre_barrier_fast_path(masm, thread, tmp1); 302 // If marking is active (*(mark queue active address) != 0), jump to stub (slow path) 303 __ cbnzw(tmp1, *stub->entry()); 304 305 __ bind(*stub->continuation()); 306 } 307 308 void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm, 309 G1PreBarrierStubC2* stub) const { 310 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 311 Label runtime; 312 Register obj = stub->obj(); 313 Register pre_val = stub->pre_val(); 314 Register thread = stub->thread(); 315 Register tmp1 = stub->tmp1(); 316 Register tmp2 = stub->tmp2(); 317 318 __ bind(*stub->entry()); 319 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, *stub->continuation(), runtime); 320 321 __ bind(runtime); 322 generate_c2_barrier_runtime_call(masm, stub, pre_val, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry)); 323 __ b(*stub->continuation()); 324 } 325 326 void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm, 327 Register store_addr, 328 Register new_val, 329 Register thread, 330 Register tmp1, 331 Register tmp2, 332 G1PostBarrierStubC2* stub) { 333 assert(thread == rthread, "must be"); 334 assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, 335 rscratch1); 336 assert(store_addr != noreg && new_val != noreg && tmp1 != noreg 337 && tmp2 != noreg, "expecting a register"); 338 339 stub->initialize_registers(thread, tmp1, tmp2); 340 341 bool new_val_may_be_null = (stub->barrier_data() & G1C2BarrierPostNotNull) == 0; 342 generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, *stub->continuation(), new_val_may_be_null); 343 // If card is not young, jump to stub (slow path) 344 __ br(Assembler::NE, *stub->entry()); 345 346 __ bind(*stub->continuation()); 347 } 348 349 void G1BarrierSetAssembler::generate_c2_post_barrier_stub(MacroAssembler* masm, 350 G1PostBarrierStubC2* stub) const { 351 Assembler::InlineSkippedInstructionsCounter skip_counter(masm); 352 Label runtime; 353 Register thread = stub->thread(); 354 Register tmp1 = stub->tmp1(); // tmp1 holds the card address. 355 Register tmp2 = stub->tmp2(); 356 assert(stub->tmp3() == noreg, "not needed in this platform"); 357 358 __ bind(*stub->entry()); 359 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, *stub->continuation(), runtime); 360 361 __ bind(runtime); 362 generate_c2_barrier_runtime_call(masm, stub, tmp1, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)); 363 __ b(*stub->continuation()); 364 } 365 366 #endif // COMPILER2 367 368 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 369 Register dst, Address src, Register tmp1, Register tmp2) { 370 bool on_oop = is_reference_type(type); 371 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 372 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 373 bool on_reference = on_weak || on_phantom; 374 ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); 375 if (on_oop && on_reference) { 376 // LR is live. It must be saved around calls. 377 __ enter(/*strip_ret_addr*/true); // barrier may call runtime 378 // Generate the G1 pre-barrier code to log the value of 379 // the referent field in an SATB buffer. 380 g1_write_barrier_pre(masm /* masm */, 381 noreg /* obj */, 382 dst /* pre_val */, 383 rthread /* thread */, 384 tmp1 /* tmp1 */, 385 tmp2 /* tmp2 */, 386 true /* tosca_live */, 387 true /* expand_call */); 388 __ leave(); 389 } 390 } 391 392 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 393 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 394 // flatten object address if needed 395 if (dst.index() == noreg && dst.offset() == 0) { 396 if (dst.base() != tmp3) { 397 __ mov(tmp3, dst.base()); 398 } 399 } else { 400 __ lea(tmp3, dst); 401 } 402 403 g1_write_barrier_pre(masm, 404 tmp3 /* obj */, 405 tmp2 /* pre_val */, 406 rthread /* thread */, 407 tmp1 /* tmp1 */, 408 rscratch2 /* tmp2 */, 409 val != noreg /* tosca_live */, 410 false /* expand_call */); 411 412 if (val == noreg) { 413 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg); 414 } else { 415 // G1 barrier needs uncompressed oop for region cross check. 416 Register new_val = val; 417 if (UseCompressedOops) { 418 new_val = rscratch2; 419 __ mov(new_val, val); 420 } 421 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg); 422 g1_write_barrier_post(masm, 423 tmp3 /* store_adr */, 424 new_val /* new_val */, 425 rthread /* thread */, 426 tmp1 /* tmp1 */, 427 tmp2 /* tmp2 */); 428 } 429 430 } 431 432 #ifdef COMPILER1 433 434 #undef __ 435 #define __ ce->masm()-> 436 437 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) { 438 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 439 // At this point we know that marking is in progress. 440 // If do_load() is true then we have to emit the 441 // load of the previous value; otherwise it has already 442 // been loaded into _pre_val. 443 444 __ bind(*stub->entry()); 445 446 assert(stub->pre_val()->is_register(), "Precondition."); 447 448 Register pre_val_reg = stub->pre_val()->as_register(); 449 450 if (stub->do_load()) { 451 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/); 452 } 453 __ cbz(pre_val_reg, *stub->continuation()); 454 ce->store_parameter(stub->pre_val()->as_register(), 0); 455 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 456 __ b(*stub->continuation()); 457 } 458 459 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) { 460 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 461 __ bind(*stub->entry()); 462 assert(stub->addr()->is_register(), "Precondition."); 463 assert(stub->new_val()->is_register(), "Precondition."); 464 Register new_val_reg = stub->new_val()->as_register(); 465 __ cbz(new_val_reg, *stub->continuation()); 466 ce->store_parameter(stub->addr()->as_pointer_register(), 0); 467 __ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin())); 468 __ b(*stub->continuation()); 469 } 470 471 #undef __ 472 473 #define __ sasm-> 474 475 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 476 __ prologue("g1_pre_barrier", false); 477 478 // arg0 : previous value of memory 479 480 BarrierSet* bs = BarrierSet::barrier_set(); 481 482 const Register pre_val = r0; 483 const Register thread = rthread; 484 const Register tmp = rscratch1; 485 486 Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 487 Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); 488 Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); 489 490 Label done; 491 Label runtime; 492 493 // Is marking still active? 494 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 495 __ ldrw(tmp, in_progress); 496 } else { 497 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 498 __ ldrb(tmp, in_progress); 499 } 500 __ cbzw(tmp, done); 501 502 // Can we store original value in the thread's buffer? 503 __ ldr(tmp, queue_index); 504 __ cbz(tmp, runtime); 505 506 __ sub(tmp, tmp, wordSize); 507 __ str(tmp, queue_index); 508 __ ldr(rscratch2, buffer); 509 __ add(tmp, tmp, rscratch2); 510 __ load_parameter(0, rscratch2); 511 __ str(rscratch2, Address(tmp, 0)); 512 __ b(done); 513 514 __ bind(runtime); 515 __ push_call_clobbered_registers(); 516 __ load_parameter(0, pre_val); 517 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); 518 __ pop_call_clobbered_registers(); 519 __ bind(done); 520 521 __ epilogue(); 522 } 523 524 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) { 525 __ prologue("g1_post_barrier", false); 526 527 // arg0: store_address 528 Address store_addr(rfp, 2*BytesPerWord); 529 530 BarrierSet* bs = BarrierSet::barrier_set(); 531 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); 532 CardTable* ct = ctbs->card_table(); 533 534 Label done; 535 Label runtime; 536 537 // At this point we know new_value is non-null and the new_value crosses regions. 538 // Must check to see if card is already dirty 539 540 const Register thread = rthread; 541 542 Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 543 Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 544 545 const Register card_offset = rscratch2; 546 // LR is free here, so we can use it to hold the byte_map_base. 547 const Register byte_map_base = lr; 548 549 assert_different_registers(card_offset, byte_map_base, rscratch1); 550 551 __ load_parameter(0, card_offset); 552 __ lsr(card_offset, card_offset, CardTable::card_shift()); 553 __ load_byte_map_base(byte_map_base); 554 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 555 __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val()); 556 __ br(Assembler::EQ, done); 557 558 assert((int)CardTable::dirty_card_val() == 0, "must be 0"); 559 560 __ membar(Assembler::StoreLoad); 561 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 562 __ cbzw(rscratch1, done); 563 564 // storing region crossing non-null, card is clean. 565 // dirty card and log. 566 __ strb(zr, Address(byte_map_base, card_offset)); 567 568 // Convert card offset into an address in card_addr 569 Register card_addr = card_offset; 570 __ add(card_addr, byte_map_base, card_addr); 571 572 __ ldr(rscratch1, queue_index); 573 __ cbz(rscratch1, runtime); 574 __ sub(rscratch1, rscratch1, wordSize); 575 __ str(rscratch1, queue_index); 576 577 // Reuse LR to hold buffer_addr 578 const Register buffer_addr = lr; 579 580 __ ldr(buffer_addr, buffer); 581 __ str(card_addr, Address(buffer_addr, rscratch1)); 582 __ b(done); 583 584 __ bind(runtime); 585 __ push_call_clobbered_registers(); 586 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); 587 __ pop_call_clobbered_registers(); 588 __ bind(done); 589 __ epilogue(); 590 } 591 592 #undef __ 593 594 #endif // COMPILER1