1 /* 2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 28 #include "gc/shenandoah/shenandoahForwarding.hpp" 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 30 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 31 #include "gc/shenandoah/shenandoahRuntime.hpp" 32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 34 #include "gc/shenandoah/mode/shenandoahMode.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "interpreter/interp_masm.hpp" 37 #include "runtime/javaThread.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #ifdef COMPILER1 40 #include "c1/c1_LIRAssembler.hpp" 41 #include "c1/c1_MacroAssembler.hpp" 42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 43 #endif 44 45 #define __ masm-> 46 47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, 48 Register src, Register dst, Register count, RegSet saved_regs) { 49 if (is_oop) { 50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 51 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) { 52 53 Label done; 54 55 // Avoid calling runtime if count == 0 56 __ cbz(count, done); 57 58 // Is GC active? 59 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 60 __ ldrb(rscratch1, gc_state); 61 if (ShenandoahSATBBarrier && dest_uninitialized) { 62 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done); 63 } else { 64 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING); 65 __ tst(rscratch1, rscratch2); 66 __ br(Assembler::EQ, done); 67 } 68 69 __ push(saved_regs, sp); 70 if (UseCompressedOops) { 71 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), src, dst, count); 72 } else { 73 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count); 74 } 75 __ pop(saved_regs, sp); 76 __ bind(done); 77 } 78 } 79 } 80 81 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, 82 Register start, Register count, Register tmp, RegSet saved_regs) { 83 if (is_oop) { 84 gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs); 85 } 86 } 87 88 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, 89 Register obj, 90 Register pre_val, 91 Register thread, 92 Register tmp, 93 bool tosca_live, 94 bool expand_call) { 95 if (ShenandoahSATBBarrier) { 96 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call); 97 } 98 } 99 100 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, 101 Register obj, 102 Register pre_val, 103 Register thread, 104 Register tmp1, 105 Register tmp2, 106 bool tosca_live, 107 bool expand_call) { 108 // If expand_call is true then we expand the call_VM_leaf macro 109 // directly to skip generating the check by 110 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 111 112 assert(thread == rthread, "must be"); 113 114 Label done; 115 Label runtime; 116 117 assert_different_registers(obj, pre_val, tmp1, tmp2); 118 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register"); 119 120 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); 121 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 122 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 123 124 // Is marking active? 125 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 126 __ ldrw(tmp1, in_progress); 127 } else { 128 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 129 __ ldrb(tmp1, in_progress); 130 } 131 __ cbzw(tmp1, done); 132 133 // Do we need to load the previous value? 134 if (obj != noreg) { 135 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 136 } 137 138 // Is the previous value null? 139 __ cbz(pre_val, done); 140 141 // Can we store original value in the thread's buffer? 142 // Is index == 0? 143 // (The index field is typed as size_t.) 144 145 __ ldr(tmp1, index); // tmp := *index_adr 146 __ cbz(tmp1, runtime); // tmp == 0? 147 // If yes, goto runtime 148 149 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize 150 __ str(tmp1, index); // *index_adr := tmp 151 __ ldr(tmp2, buffer); 152 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr 153 154 // Record the previous value 155 __ str(pre_val, Address(tmp1, 0)); 156 __ b(done); 157 158 __ bind(runtime); 159 // save the live input values 160 RegSet saved = RegSet::of(pre_val); 161 if (tosca_live) saved += RegSet::of(r0); 162 if (obj != noreg) saved += RegSet::of(obj); 163 164 __ push(saved, sp); 165 166 // Calling the runtime using the regular call_VM_leaf mechanism generates 167 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 168 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL. 169 // 170 // If we care generating the pre-barrier without a frame (e.g. in the 171 // intrinsified Reference.get() routine) then rfp might be pointing to 172 // the caller frame and so this check will most likely fail at runtime. 173 // 174 // Expanding the call directly bypasses the generation of the check. 175 // So when we do not have have a full interpreter frame on the stack 176 // expand_call should be passed true. 177 178 if (expand_call) { 179 assert(pre_val != c_rarg1, "smashed arg"); 180 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); 181 } else { 182 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); 183 } 184 185 __ pop(saved, sp); 186 187 __ bind(done); 188 } 189 190 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) { 191 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); 192 Label is_null; 193 __ cbz(dst, is_null); 194 resolve_forward_pointer_not_null(masm, dst, tmp); 195 __ bind(is_null); 196 } 197 198 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitly 199 // passed in. 200 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) { 201 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); 202 // The below loads the mark word, checks if the lowest two bits are 203 // set, and if so, clear the lowest two bits and copy the result 204 // to dst. Otherwise it leaves dst alone. 205 // Implementing this is surprisingly awkward. I do it here by: 206 // - Inverting the mark word 207 // - Test lowest two bits == 0 208 // - If so, set the lowest two bits 209 // - Invert the result back, and copy to dst 210 211 bool borrow_reg = (tmp == noreg); 212 if (borrow_reg) { 213 // No free registers available. Make one useful. 214 tmp = rscratch1; 215 if (tmp == dst) { 216 tmp = rscratch2; 217 } 218 __ push(RegSet::of(tmp), sp); 219 } 220 221 assert_different_registers(tmp, dst); 222 223 Label done; 224 __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes())); 225 __ eon(tmp, tmp, zr); 226 __ ands(zr, tmp, markWord::lock_mask_in_place); 227 __ br(Assembler::NE, done); 228 __ orr(tmp, tmp, markWord::marked_value); 229 __ eon(dst, tmp, zr); 230 __ bind(done); 231 232 if (borrow_reg) { 233 __ pop(RegSet::of(tmp), sp); 234 } 235 } 236 237 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators) { 238 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 239 assert(dst != rscratch2, "need rscratch2"); 240 assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2); 241 242 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 243 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 244 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 245 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 246 bool is_narrow = UseCompressedOops && !is_native; 247 248 Label heap_stable, not_cset; 249 __ enter(/*strip_ret_addr*/true); 250 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 251 __ ldrb(rscratch2, gc_state); 252 253 // Check for heap stability 254 if (is_strong) { 255 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable); 256 } else { 257 Label lrb; 258 __ tbnz(rscratch2, ShenandoahHeap::WEAK_ROOTS_BITPOS, lrb); 259 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable); 260 __ bind(lrb); 261 } 262 263 // use r1 for load address 264 Register result_dst = dst; 265 if (dst == r1) { 266 __ mov(rscratch1, dst); 267 dst = rscratch1; 268 } 269 270 // Save r0 and r1, unless it is an output register 271 RegSet to_save = RegSet::of(r0, r1) - result_dst; 272 __ push(to_save, sp); 273 __ lea(r1, load_addr); 274 __ mov(r0, dst); 275 276 // Test for in-cset 277 if (is_strong) { 278 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); 279 __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 280 __ ldrb(rscratch2, Address(rscratch2, rscratch1)); 281 __ tbz(rscratch2, 0, not_cset); 282 } 283 284 __ push_call_clobbered_registers(); 285 if (is_strong) { 286 if (is_narrow) { 287 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)); 288 } else { 289 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)); 290 } 291 } else if (is_weak) { 292 if (is_narrow) { 293 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow)); 294 } else { 295 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)); 296 } 297 } else { 298 assert(is_phantom, "only remaining strength"); 299 assert(!is_narrow, "phantom access cannot be narrow"); 300 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)); 301 } 302 __ blr(lr); 303 __ mov(rscratch1, r0); 304 __ pop_call_clobbered_registers(); 305 __ mov(r0, rscratch1); 306 307 __ bind(not_cset); 308 309 __ mov(result_dst, r0); 310 __ pop(to_save, sp); 311 312 __ bind(heap_stable); 313 __ leave(); 314 } 315 316 void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) { 317 if (ShenandoahIUBarrier) { 318 __ push_call_clobbered_registers(); 319 satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, rscratch1, true, false); 320 __ pop_call_clobbered_registers(); 321 } 322 } 323 324 // 325 // Arguments: 326 // 327 // Inputs: 328 // src: oop location to load from, might be clobbered 329 // 330 // Output: 331 // dst: oop loaded from src location 332 // 333 // Kill: 334 // rscratch1 (scratch reg) 335 // 336 // Alias: 337 // dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src) 338 // 339 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 340 Register dst, Address src, Register tmp1, Register tmp2) { 341 // 1: non-reference load, no additional barrier is needed 342 if (!is_reference_type(type)) { 343 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); 344 return; 345 } 346 347 // 2: load a reference from src location and apply LRB if needed 348 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { 349 Register result_dst = dst; 350 351 // Preserve src location for LRB 352 if (dst == src.base() || dst == src.index()) { 353 dst = rscratch1; 354 } 355 assert_different_registers(dst, src.base(), src.index()); 356 357 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); 358 359 load_reference_barrier(masm, dst, src, decorators); 360 361 if (dst != result_dst) { 362 __ mov(result_dst, dst); 363 dst = result_dst; 364 } 365 } else { 366 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2); 367 } 368 369 // 3: apply keep-alive barrier if needed 370 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { 371 __ enter(/*strip_ret_addr*/true); 372 __ push_call_clobbered_registers(); 373 satb_write_barrier_pre(masm /* masm */, 374 noreg /* obj */, 375 dst /* pre_val */, 376 rthread /* thread */, 377 tmp1 /* tmp1 */, 378 tmp2 /* tmp2 */, 379 true /* tosca_live */, 380 true /* expand_call */); 381 __ pop_call_clobbered_registers(); 382 __ leave(); 383 } 384 } 385 386 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) { 387 if (!ShenandoahHeap::heap()->mode()->is_generational()) { 388 return; 389 } 390 391 ShenandoahBarrierSet* ctbs = ShenandoahBarrierSet::barrier_set(); 392 CardTable* ct = ctbs->card_table(); 393 394 __ lsr(obj, obj, CardTable::card_shift()); 395 396 assert(CardTable::dirty_card_val() == 0, "must be"); 397 398 __ load_byte_map_base(rscratch1); 399 400 if (UseCondCardMark) { 401 Label L_already_dirty; 402 __ ldrb(rscratch2, Address(obj, rscratch1)); 403 __ cbz(rscratch2, L_already_dirty); 404 __ strb(zr, Address(obj, rscratch1)); 405 __ bind(L_already_dirty); 406 } else { 407 __ strb(zr, Address(obj, rscratch1)); 408 } 409 } 410 411 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 412 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 413 bool on_oop = is_reference_type(type); 414 if (!on_oop) { 415 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); 416 return; 417 } 418 419 // flatten object address if needed 420 if (dst.index() == noreg && dst.offset() == 0) { 421 if (dst.base() != tmp3) { 422 __ mov(tmp3, dst.base()); 423 } 424 } else { 425 __ lea(tmp3, dst); 426 } 427 428 shenandoah_write_barrier_pre(masm, 429 tmp3 /* obj */, 430 tmp2 /* pre_val */, 431 rthread /* thread */, 432 tmp1 /* tmp */, 433 val != noreg /* tosca_live */, 434 false /* expand_call */); 435 436 if (val == noreg) { 437 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg); 438 } else { 439 iu_barrier(masm, val, tmp1); 440 // G1 barrier needs uncompressed oop for region cross check. 441 Register new_val = val; 442 if (UseCompressedOops) { 443 new_val = rscratch2; 444 __ mov(new_val, val); 445 } 446 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg); 447 store_check(masm, r3); 448 } 449 450 } 451 452 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 453 Register obj, Register tmp, Label& slowpath) { 454 Label done; 455 // Resolve jobject 456 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); 457 458 // Check for null. 459 __ cbz(obj, done); 460 461 assert(obj != rscratch2, "need rscratch2"); 462 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()); 463 __ lea(rscratch2, gc_state); 464 __ ldrb(rscratch2, Address(rscratch2)); 465 466 // Check for heap in evacuation phase 467 __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath); 468 469 __ bind(done); 470 } 471 472 // Special Shenandoah CAS implementation that handles false negatives due 473 // to concurrent evacuation. The service is more complex than a 474 // traditional CAS operation because the CAS operation is intended to 475 // succeed if the reference at addr exactly matches expected or if the 476 // reference at addr holds a pointer to a from-space object that has 477 // been relocated to the location named by expected. There are two 478 // races that must be addressed: 479 // a) A parallel thread may mutate the contents of addr so that it points 480 // to a different object. In this case, the CAS operation should fail. 481 // b) A parallel thread may heal the contents of addr, replacing a 482 // from-space pointer held in addr with the to-space pointer 483 // representing the new location of the object. 484 // Upon entry to cmpxchg_oop, it is assured that new_val equals NULL 485 // or it refers to an object that is not being evacuated out of 486 // from-space, or it refers to the to-space version of an object that 487 // is being evacuated out of from-space. 488 // 489 // By default the value held in the result register following execution 490 // of the generated code sequence is 0 to indicate failure of CAS, 491 // non-zero to indicate success. If is_cae, the result is the value most 492 // recently fetched from addr rather than a boolean success indicator. 493 // 494 // Clobbers rscratch1, rscratch2 495 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 496 Register addr, 497 Register expected, 498 Register new_val, 499 bool acquire, bool release, 500 bool is_cae, 501 Register result) { 502 Register tmp1 = rscratch1; 503 Register tmp2 = rscratch2; 504 bool is_narrow = UseCompressedOops; 505 Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword; 506 507 assert_different_registers(addr, expected, tmp1, tmp2); 508 assert_different_registers(addr, new_val, tmp1, tmp2); 509 510 Label step4, done; 511 512 // There are two ways to reach this label. Initial entry into the 513 // cmpxchg_oop code expansion starts at step1 (which is equivalent 514 // to label step4). Additionally, in the rare case that four steps 515 // are required to perform the requested operation, the fourth step 516 // is the same as the first. On a second pass through step 1, 517 // control may flow through step 2 on its way to failure. It will 518 // not flow from step 2 to step 3 since we are assured that the 519 // memory at addr no longer holds a from-space pointer. 520 // 521 // The comments that immediately follow the step4 label apply only 522 // to the case in which control reaches this label by branch from 523 // step 3. 524 525 __ bind (step4); 526 527 // Step 4. CAS has failed because the value most recently fetched 528 // from addr is no longer the from-space pointer held in tmp2. If a 529 // different thread replaced the in-memory value with its equivalent 530 // to-space pointer, then CAS may still be able to succeed. The 531 // value held in the expected register has not changed. 532 // 533 // It is extremely rare we reach this point. For this reason, the 534 // implementation opts for smaller rather than potentially faster 535 // code. Ultimately, smaller code for this rare case most likely 536 // delivers higher overall throughput by enabling improved icache 537 // performance. 538 539 // Step 1. Fast-path. 540 // 541 // Try to CAS with given arguments. If successful, then we are done. 542 // 543 // No label required for step 1. 544 545 __ cmpxchg(addr, expected, new_val, size, acquire, release, false, tmp2); 546 // EQ flag set iff success. tmp2 holds value fetched. 547 548 // If expected equals null but tmp2 does not equal null, the 549 // following branches to done to report failure of CAS. If both 550 // expected and tmp2 equal null, the following branches to done to 551 // report success of CAS. There's no need for a special test of 552 // expected equal to null. 553 554 __ br(Assembler::EQ, done); 555 // if CAS failed, fall through to step 2 556 557 // Step 2. CAS has failed because the value held at addr does not 558 // match expected. This may be a false negative because the value fetched 559 // from addr (now held in tmp2) may be a from-space pointer to the 560 // original copy of same object referenced by to-space pointer expected. 561 // 562 // To resolve this, it suffices to find the forward pointer associated 563 // with fetched value. If this matches expected, retry CAS with new 564 // parameters. If this mismatches, then we have a legitimate 565 // failure, and we're done. 566 // 567 // No need for step2 label. 568 569 // overwrite tmp1 with from-space pointer fetched from memory 570 __ mov(tmp1, tmp2); 571 572 if (is_narrow) { 573 // Decode tmp1 in order to resolve its forward pointer 574 __ decode_heap_oop(tmp1, tmp1); 575 } 576 resolve_forward_pointer(masm, tmp1); 577 // Encode tmp1 to compare against expected. 578 __ encode_heap_oop(tmp1, tmp1); 579 580 // Does forwarded value of fetched from-space pointer match original 581 // value of expected? If tmp1 holds null, this comparison will fail 582 // because we know from step1 that expected is not null. There is 583 // no need for a separate test for tmp1 (the value originally held 584 // in memory) equal to null. 585 __ cmp(tmp1, expected); 586 587 // If not, then the failure was legitimate and we're done. 588 // Branching to done with NE condition denotes failure. 589 __ br(Assembler::NE, done); 590 591 // Fall through to step 3. No need for step3 label. 592 593 // Step 3. We've confirmed that the value originally held in memory 594 // (now held in tmp2) pointed to from-space version of original 595 // expected value. Try the CAS again with the from-space expected 596 // value. If it now succeeds, we're good. 597 // 598 // Note: tmp2 holds encoded from-space pointer that matches to-space 599 // object residing at expected. tmp2 is the new "expected". 600 601 // Note that macro implementation of __cmpxchg cannot use same register 602 // tmp2 for result and expected since it overwrites result before it 603 // compares result with expected. 604 __ cmpxchg(addr, tmp2, new_val, size, acquire, release, false, noreg); 605 // EQ flag set iff success. tmp2 holds value fetched, tmp1 (rscratch1) clobbered. 606 607 // If fetched value did not equal the new expected, this could 608 // still be a false negative because some other thread may have 609 // newly overwritten the memory value with its to-space equivalent. 610 __ br(Assembler::NE, step4); 611 612 if (is_cae) { 613 // We're falling through to done to indicate success. Success 614 // with is_cae is denoted by returning the value of expected as 615 // result. 616 __ mov(tmp2, expected); 617 } 618 619 __ bind(done); 620 // At entry to done, the Z (EQ) flag is on iff if the CAS 621 // operation was successful. Additionally, if is_cae, tmp2 holds 622 // the value most recently fetched from addr. In this case, success 623 // is denoted by tmp2 matching expected. 624 625 if (is_cae) { 626 __ mov(result, tmp2); 627 } else { 628 __ cset(result, Assembler::EQ); 629 } 630 } 631 632 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, 633 Register start, Register count, Register scratch, RegSet saved_regs) { 634 if (!ShenandoahHeap::heap()->mode()->is_generational()) { 635 return; 636 } 637 638 ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); 639 CardTable* ct = bs->card_table(); 640 641 Label L_loop, L_done; 642 const Register end = count; 643 644 __ cbz(count, L_done); // zero count - nothing to do 645 646 __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop))); // end = start + count << LogBytesPerHeapOop 647 __ sub(end, end, BytesPerHeapOop); // last element address to make inclusive 648 __ lsr(start, start, CardTable::card_shift()); 649 __ lsr(end, end, CardTable::card_shift()); 650 __ sub(count, end, start); // number of bytes to copy 651 652 __ load_byte_map_base(scratch); 653 __ add(start, start, scratch); 654 __ bind(L_loop); 655 __ strb(zr, Address(start, count)); 656 __ subs(count, count, 1); 657 __ br(Assembler::GE, L_loop); 658 __ bind(L_done); 659 } 660 661 #undef __ 662 663 #ifdef COMPILER1 664 665 #define __ ce->masm()-> 666 667 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { 668 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 669 // At this point we know that marking is in progress. 670 // If do_load() is true then we have to emit the 671 // load of the previous value; otherwise it has already 672 // been loaded into _pre_val. 673 674 __ bind(*stub->entry()); 675 676 assert(stub->pre_val()->is_register(), "Precondition."); 677 678 Register pre_val_reg = stub->pre_val()->as_register(); 679 680 if (stub->do_load()) { 681 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/); 682 } 683 __ cbz(pre_val_reg, *stub->continuation()); 684 ce->store_parameter(stub->pre_val()->as_register(), 0); 685 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 686 __ b(*stub->continuation()); 687 } 688 689 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { 690 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 691 __ bind(*stub->entry()); 692 693 DecoratorSet decorators = stub->decorators(); 694 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 695 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 696 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 697 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 698 699 Register obj = stub->obj()->as_register(); 700 Register res = stub->result()->as_register(); 701 Register addr = stub->addr()->as_pointer_register(); 702 Register tmp1 = stub->tmp1()->as_register(); 703 Register tmp2 = stub->tmp2()->as_register(); 704 705 assert(res == r0, "result must arrive in r0"); 706 707 if (res != obj) { 708 __ mov(res, obj); 709 } 710 711 if (is_strong) { 712 // Check for object in cset. 713 __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr()); 714 __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 715 __ ldrb(tmp2, Address(tmp2, tmp1)); 716 __ cbz(tmp2, *stub->continuation()); 717 } 718 719 ce->store_parameter(res, 0); 720 ce->store_parameter(addr, 1); 721 if (is_strong) { 722 if (is_native) { 723 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin())); 724 } else { 725 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin())); 726 } 727 } else if (is_weak) { 728 __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin())); 729 } else { 730 assert(is_phantom, "only remaining strength"); 731 __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin())); 732 } 733 734 __ b(*stub->continuation()); 735 } 736 737 #undef __ 738 739 #define __ sasm-> 740 741 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 742 __ prologue("shenandoah_pre_barrier", false); 743 744 // arg0 : previous value of memory 745 746 BarrierSet* bs = BarrierSet::barrier_set(); 747 748 const Register pre_val = r0; 749 const Register thread = rthread; 750 const Register tmp = rscratch1; 751 752 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 753 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 754 755 Label done; 756 Label runtime; 757 758 // Is marking still active? 759 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 760 __ ldrb(tmp, gc_state); 761 if (!ShenandoahHeap::heap()->mode()->is_generational()) { 762 __ tbz(tmp, ShenandoahHeap::YOUNG_MARKING_BITPOS, done); 763 } else { 764 __ mov(rscratch2, ShenandoahHeap::YOUNG_MARKING | ShenandoahHeap::OLD_MARKING); 765 __ tst(tmp, rscratch2); 766 __ br(Assembler::EQ, done); 767 } 768 769 // Can we store original value in the thread's buffer? 770 __ ldr(tmp, queue_index); 771 __ cbz(tmp, runtime); 772 773 __ sub(tmp, tmp, wordSize); 774 __ str(tmp, queue_index); 775 __ ldr(rscratch2, buffer); 776 __ add(tmp, tmp, rscratch2); 777 __ load_parameter(0, rscratch2); 778 __ str(rscratch2, Address(tmp, 0)); 779 __ b(done); 780 781 __ bind(runtime); 782 __ push_call_clobbered_registers(); 783 __ load_parameter(0, pre_val); 784 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread); 785 __ pop_call_clobbered_registers(); 786 __ bind(done); 787 788 __ epilogue(); 789 } 790 791 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) { 792 __ prologue("shenandoah_load_reference_barrier", false); 793 // arg0 : object to be resolved 794 795 __ push_call_clobbered_registers(); 796 __ load_parameter(0, r0); 797 __ load_parameter(1, r1); 798 799 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 800 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 801 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 802 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 803 if (is_strong) { 804 if (is_native) { 805 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)); 806 } else { 807 if (UseCompressedOops) { 808 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow)); 809 } else { 810 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong)); 811 } 812 } 813 } else if (is_weak) { 814 assert(!is_native, "weak must not be called off-heap"); 815 if (UseCompressedOops) { 816 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow)); 817 } else { 818 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak)); 819 } 820 } else { 821 assert(is_phantom, "only remaining strength"); 822 assert(is_native, "phantom must only be called off-heap"); 823 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)); 824 } 825 __ blr(lr); 826 __ mov(rscratch1, r0); 827 __ pop_call_clobbered_registers(); 828 __ mov(r0, rscratch1); 829 830 __ epilogue(); 831 } 832 833 #undef __ 834 835 #endif // COMPILER1