1 /* 2 * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "gc/g1/g1BarrierSet.hpp" 28 #include "gc/g1/g1BarrierSetAssembler.hpp" 29 #include "gc/g1/g1BarrierSetRuntime.hpp" 30 #include "gc/g1/g1CardTable.hpp" 31 #include "gc/g1/g1HeapRegion.hpp" 32 #include "gc/g1/g1ThreadLocalData.hpp" 33 #include "interpreter/interp_masm.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "utilities/debug.hpp" 36 #include "utilities/macros.hpp" 37 #ifdef COMPILER1 38 #include "c1/c1_LIRAssembler.hpp" 39 #include "c1/c1_MacroAssembler.hpp" 40 #include "gc/g1/c1/g1BarrierSetC1.hpp" 41 #endif 42 43 #define __ masm-> 44 45 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, 46 Register addr, Register count) { 47 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 48 49 if (!dest_uninitialized) { 50 Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 51 #ifndef _LP64 52 __ push(thread); 53 __ get_thread(thread); 54 #endif 55 56 Label filtered; 57 Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 58 // Is marking active? 59 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 60 __ cmpl(in_progress, 0); 61 } else { 62 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 63 __ cmpb(in_progress, 0); 64 } 65 66 NOT_LP64(__ pop(thread);) 67 68 __ jcc(Assembler::equal, filtered); 69 70 __ push_call_clobbered_registers(false /* save_fpu */); 71 #ifdef _LP64 72 if (count == c_rarg0) { 73 if (addr == c_rarg1) { 74 // exactly backwards!! 75 __ xchgptr(c_rarg1, c_rarg0); 76 } else { 77 __ movptr(c_rarg1, count); 78 __ movptr(c_rarg0, addr); 79 } 80 } else { 81 __ movptr(c_rarg0, addr); 82 __ movptr(c_rarg1, count); 83 } 84 if (UseCompressedOops) { 85 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2); 86 } else { 87 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2); 88 } 89 #else 90 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 91 addr, count); 92 #endif 93 __ pop_call_clobbered_registers(false /* save_fpu */); 94 95 __ bind(filtered); 96 } 97 } 98 99 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, 100 Register addr, Register count, Register tmp) { 101 __ push_call_clobbered_registers(false /* save_fpu */); 102 #ifdef _LP64 103 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 104 assert_different_registers(c_rarg1, addr); 105 __ mov(c_rarg1, count); 106 __ mov(c_rarg0, addr); 107 } else { 108 assert_different_registers(c_rarg0, count); 109 __ mov(c_rarg0, addr); 110 __ mov(c_rarg1, count); 111 } 112 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2); 113 #else 114 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 115 addr, count); 116 #endif 117 __ pop_call_clobbered_registers(false /* save_fpu */); 118 } 119 120 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 121 Register dst, Address src, Register tmp1, Register tmp_thread) { 122 bool on_oop = is_reference_type(type); 123 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; 124 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 125 bool on_reference = on_weak || on_phantom; 126 ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 127 if (on_oop && on_reference) { 128 Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); 129 130 #ifndef _LP64 131 // Work around the x86_32 bug that only manifests with Loom for some reason. 132 // MacroAssembler::resolve_weak_handle calls this barrier with tmp_thread == noreg. 133 if (thread == noreg) { 134 if (dst != rcx && tmp1 != rcx) { 135 thread = rcx; 136 } else if (dst != rdx && tmp1 != rdx) { 137 thread = rdx; 138 } else if (dst != rdi && tmp1 != rdi) { 139 thread = rdi; 140 } 141 } 142 assert_different_registers(dst, tmp1, thread); 143 __ push(thread); 144 __ get_thread(thread); 145 #endif 146 147 // Generate the G1 pre-barrier code to log the value of 148 // the referent field in an SATB buffer. 149 g1_write_barrier_pre(masm /* masm */, 150 noreg /* obj */, 151 dst /* pre_val */, 152 thread /* thread */, 153 tmp1 /* tmp */, 154 true /* tosca_live */, 155 true /* expand_call */); 156 157 #ifndef _LP64 158 __ pop(thread); 159 #endif 160 } 161 } 162 163 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, 164 Register obj, 165 Register pre_val, 166 Register thread, 167 Register tmp, 168 bool tosca_live, 169 bool expand_call) { 170 // If expand_call is true then we expand the call_VM_leaf macro 171 // directly to skip generating the check by 172 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 173 174 #ifdef _LP64 175 assert(thread == r15_thread, "must be"); 176 #endif // _LP64 177 178 Label done; 179 Label runtime; 180 181 assert(pre_val != noreg, "check this code"); 182 183 if (obj != noreg) { 184 assert_different_registers(obj, pre_val, tmp); 185 assert(pre_val != rax, "check this code"); 186 } 187 188 Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 189 Address index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); 190 Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); 191 192 // Is marking active? 193 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 194 __ cmpl(in_progress, 0); 195 } else { 196 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 197 __ cmpb(in_progress, 0); 198 } 199 __ jcc(Assembler::equal, done); 200 201 // Do we need to load the previous value? 202 if (obj != noreg) { 203 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 204 } 205 206 // Is the previous value null? 207 __ cmpptr(pre_val, NULL_WORD); 208 __ jcc(Assembler::equal, done); 209 210 // Can we store original value in the thread's buffer? 211 // Is index == 0? 212 // (The index field is typed as size_t.) 213 214 __ movptr(tmp, index); // tmp := *index_adr 215 __ cmpptr(tmp, 0); // tmp == 0? 216 __ jcc(Assembler::equal, runtime); // If yes, goto runtime 217 218 __ subptr(tmp, wordSize); // tmp := tmp - wordSize 219 __ movptr(index, tmp); // *index_adr := tmp 220 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr 221 222 // Record the previous value 223 __ movptr(Address(tmp, 0), pre_val); 224 __ jmp(done); 225 226 __ bind(runtime); 227 228 // Determine and save the live input values 229 __ push_call_clobbered_registers(); 230 231 // Calling the runtime using the regular call_VM_leaf mechanism generates 232 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 233 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr. 234 // 235 // If we care generating the pre-barrier without a frame (e.g. in the 236 // intrinsified Reference.get() routine) then ebp might be pointing to 237 // the caller frame and so this check will most likely fail at runtime. 238 // 239 // Expanding the call directly bypasses the generation of the check. 240 // So when we do not have have a full interpreter frame on the stack 241 // expand_call should be passed true. 242 243 if (expand_call) { 244 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) 245 #ifdef _LP64 246 if (c_rarg1 != thread) { 247 __ mov(c_rarg1, thread); 248 } 249 if (c_rarg0 != pre_val) { 250 __ mov(c_rarg0, pre_val); 251 } 252 #else 253 __ push(thread); 254 __ push(pre_val); 255 #endif 256 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2); 257 } else { 258 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread); 259 } 260 261 __ pop_call_clobbered_registers(); 262 263 __ bind(done); 264 } 265 266 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, 267 Register store_addr, 268 Register new_val, 269 Register thread, 270 Register tmp, 271 Register tmp2) { 272 #ifdef _LP64 273 assert(thread == r15_thread, "must be"); 274 #endif // _LP64 275 276 Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 277 Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 278 279 CardTableBarrierSet* ct = 280 barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 281 282 Label done; 283 Label runtime; 284 285 // Does store cross heap regions? 286 287 __ movptr(tmp, store_addr); 288 __ xorptr(tmp, new_val); 289 __ shrptr(tmp, G1HeapRegion::LogOfHRGrainBytes); 290 __ jcc(Assembler::equal, done); 291 292 // crosses regions, storing null? 293 294 __ cmpptr(new_val, NULL_WORD); 295 __ jcc(Assembler::equal, done); 296 297 // storing region crossing non-null, is card already dirty? 298 299 const Register card_addr = tmp; 300 const Register cardtable = tmp2; 301 302 __ movptr(card_addr, store_addr); 303 __ shrptr(card_addr, CardTable::card_shift()); 304 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT 305 // a valid address and therefore is not properly handled by the relocation code. 306 __ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base()); 307 __ addptr(card_addr, cardtable); 308 309 __ cmpb(Address(card_addr, 0), G1CardTable::g1_young_card_val()); 310 __ jcc(Assembler::equal, done); 311 312 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 313 __ cmpb(Address(card_addr, 0), G1CardTable::dirty_card_val()); 314 __ jcc(Assembler::equal, done); 315 316 317 // storing a region crossing, non-null oop, card is clean. 318 // dirty card and log. 319 320 __ movb(Address(card_addr, 0), G1CardTable::dirty_card_val()); 321 322 // The code below assumes that buffer index is pointer sized. 323 STATIC_ASSERT(in_bytes(G1DirtyCardQueue::byte_width_of_index()) == sizeof(intptr_t)); 324 325 __ movptr(tmp2, queue_index); 326 __ testptr(tmp2, tmp2); 327 __ jcc(Assembler::zero, runtime); 328 __ subptr(tmp2, wordSize); 329 __ movptr(queue_index, tmp2); 330 __ addptr(tmp2, buffer); 331 __ movptr(Address(tmp2, 0), card_addr); 332 __ jmp(done); 333 334 __ bind(runtime); 335 // save the live input values 336 RegSet saved = RegSet::of(store_addr NOT_LP64(COMMA thread)); 337 __ push_set(saved); 338 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); 339 __ pop_set(saved); 340 341 __ bind(done); 342 } 343 344 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 345 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 346 bool in_heap = (decorators & IN_HEAP) != 0; 347 bool as_normal = (decorators & AS_NORMAL) != 0; 348 349 bool needs_pre_barrier = as_normal; 350 bool needs_post_barrier = val != noreg && in_heap; 351 352 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 353 // flatten object address if needed 354 // We do it regardless of precise because we need the registers 355 if (dst.index() == noreg && dst.disp() == 0) { 356 if (dst.base() != tmp1) { 357 __ movptr(tmp1, dst.base()); 358 } 359 } else { 360 __ lea(tmp1, dst); 361 } 362 363 #ifndef _LP64 364 InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm); 365 #endif 366 367 NOT_LP64(__ get_thread(rcx)); 368 NOT_LP64(imasm->save_bcp()); 369 370 if (needs_pre_barrier) { 371 g1_write_barrier_pre(masm /*masm*/, 372 tmp1 /* obj */, 373 tmp2 /* pre_val */, 374 rthread /* thread */, 375 tmp3 /* tmp */, 376 val != noreg /* tosca_live */, 377 false /* expand_call */); 378 } 379 if (val == noreg) { 380 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg); 381 } else { 382 Register new_val = val; 383 if (needs_post_barrier) { 384 // G1 barrier needs uncompressed oop for region cross check. 385 if (UseCompressedOops) { 386 new_val = tmp2; 387 __ movptr(new_val, val); 388 } 389 } 390 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg); 391 if (needs_post_barrier) { 392 g1_write_barrier_post(masm /*masm*/, 393 tmp1 /* store_adr */, 394 new_val /* new_val */, 395 rthread /* thread */, 396 tmp3 /* tmp */, 397 tmp2 /* tmp2 */); 398 } 399 } 400 NOT_LP64(imasm->restore_bcp()); 401 } 402 403 #ifdef COMPILER1 404 405 #undef __ 406 #define __ ce->masm()-> 407 408 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) { 409 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 410 // At this point we know that marking is in progress. 411 // If do_load() is true then we have to emit the 412 // load of the previous value; otherwise it has already 413 // been loaded into _pre_val. 414 415 __ bind(*stub->entry()); 416 assert(stub->pre_val()->is_register(), "Precondition."); 417 418 Register pre_val_reg = stub->pre_val()->as_register(); 419 420 if (stub->do_load()) { 421 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/); 422 } 423 424 __ cmpptr(pre_val_reg, NULL_WORD); 425 __ jcc(Assembler::equal, *stub->continuation()); 426 ce->store_parameter(stub->pre_val()->as_register(), 0); 427 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 428 __ jmp(*stub->continuation()); 429 430 } 431 432 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) { 433 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 434 __ bind(*stub->entry()); 435 assert(stub->addr()->is_register(), "Precondition."); 436 assert(stub->new_val()->is_register(), "Precondition."); 437 Register new_val_reg = stub->new_val()->as_register(); 438 __ cmpptr(new_val_reg, NULL_WORD); 439 __ jcc(Assembler::equal, *stub->continuation()); 440 ce->store_parameter(stub->addr()->as_pointer_register(), 0); 441 __ call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin())); 442 __ jmp(*stub->continuation()); 443 } 444 445 #undef __ 446 447 #define __ sasm-> 448 449 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 450 // Generated code assumes that buffer index is pointer sized. 451 STATIC_ASSERT(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t)); 452 453 __ prologue("g1_pre_barrier", false); 454 // arg0 : previous value of memory 455 456 __ push(rax); 457 __ push(rdx); 458 459 const Register pre_val = rax; 460 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 461 const Register tmp = rdx; 462 463 NOT_LP64(__ get_thread(thread);) 464 465 Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 466 Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); 467 Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); 468 469 Label done; 470 Label runtime; 471 472 // Is marking still active? 473 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 474 __ cmpl(queue_active, 0); 475 } else { 476 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 477 __ cmpb(queue_active, 0); 478 } 479 __ jcc(Assembler::equal, done); 480 481 // Can we store original value in the thread's buffer? 482 483 __ movptr(tmp, queue_index); 484 __ testptr(tmp, tmp); 485 __ jcc(Assembler::zero, runtime); 486 __ subptr(tmp, wordSize); 487 __ movptr(queue_index, tmp); 488 __ addptr(tmp, buffer); 489 490 // prev_val (rax) 491 __ load_parameter(0, pre_val); 492 __ movptr(Address(tmp, 0), pre_val); 493 __ jmp(done); 494 495 __ bind(runtime); 496 497 __ push_call_clobbered_registers(); 498 499 // load the pre-value 500 __ load_parameter(0, rcx); 501 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), rcx, thread); 502 503 __ pop_call_clobbered_registers(); 504 505 __ bind(done); 506 507 __ pop(rdx); 508 __ pop(rax); 509 510 __ epilogue(); 511 } 512 513 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) { 514 __ prologue("g1_post_barrier", false); 515 516 CardTableBarrierSet* ct = 517 barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); 518 519 Label done; 520 Label enqueued; 521 Label runtime; 522 523 // At this point we know new_value is non-null and the new_value crosses regions. 524 // Must check to see if card is already dirty 525 526 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 527 528 Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 529 Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 530 531 __ push(rax); 532 __ push(rcx); 533 534 const Register cardtable = rax; 535 const Register card_addr = rcx; 536 537 __ load_parameter(0, card_addr); 538 __ shrptr(card_addr, CardTable::card_shift()); 539 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT 540 // a valid address and therefore is not properly handled by the relocation code. 541 __ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base()); 542 __ addptr(card_addr, cardtable); 543 544 NOT_LP64(__ get_thread(thread);) 545 546 __ cmpb(Address(card_addr, 0), G1CardTable::g1_young_card_val()); 547 __ jcc(Assembler::equal, done); 548 549 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 550 __ cmpb(Address(card_addr, 0), CardTable::dirty_card_val()); 551 __ jcc(Assembler::equal, done); 552 553 // storing region crossing non-null, card is clean. 554 // dirty card and log. 555 556 __ movb(Address(card_addr, 0), CardTable::dirty_card_val()); 557 558 const Register tmp = rdx; 559 __ push(rdx); 560 561 __ movptr(tmp, queue_index); 562 __ testptr(tmp, tmp); 563 __ jcc(Assembler::zero, runtime); 564 __ subptr(tmp, wordSize); 565 __ movptr(queue_index, tmp); 566 __ addptr(tmp, buffer); 567 __ movptr(Address(tmp, 0), card_addr); 568 __ jmp(enqueued); 569 570 __ bind(runtime); 571 __ push_call_clobbered_registers(); 572 573 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread); 574 575 __ pop_call_clobbered_registers(); 576 577 __ bind(enqueued); 578 __ pop(rdx); 579 580 __ bind(done); 581 __ pop(rcx); 582 __ pop(rax); 583 584 __ epilogue(); 585 } 586 587 #undef __ 588 589 #endif // COMPILER1 --- EOF ---