1 /* 2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 29 #include "gc/shenandoah/shenandoahForwarding.hpp" 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 32 #include "gc/shenandoah/shenandoahRuntime.hpp" 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 35 #include "gc/shenandoah/mode/shenandoahMode.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "runtime/javaThread.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "utilities/macros.hpp" 40 #ifdef COMPILER1 41 #include "c1/c1_LIRAssembler.hpp" 42 #include "c1/c1_MacroAssembler.hpp" 43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 44 #endif 45 46 #define __ masm-> 47 48 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) { 49 if (handle_gpr) { 50 __ push_IU_state(); 51 } 52 53 if (handle_fp) { 54 // Some paths can be reached from the c2i adapter with live fp arguments in registers. 55 LP64_ONLY(assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call")); 56 57 if (UseSSE >= 2) { 58 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4); 59 __ subptr(rsp, xmm_size * 8); 60 __ movdbl(Address(rsp, xmm_size * 0), xmm0); 61 __ movdbl(Address(rsp, xmm_size * 1), xmm1); 62 __ movdbl(Address(rsp, xmm_size * 2), xmm2); 63 __ movdbl(Address(rsp, xmm_size * 3), xmm3); 64 __ movdbl(Address(rsp, xmm_size * 4), xmm4); 65 __ movdbl(Address(rsp, xmm_size * 5), xmm5); 66 __ movdbl(Address(rsp, xmm_size * 6), xmm6); 67 __ movdbl(Address(rsp, xmm_size * 7), xmm7); 68 } else if (UseSSE >= 1) { 69 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2); 70 __ subptr(rsp, xmm_size * 8); 71 __ movflt(Address(rsp, xmm_size * 0), xmm0); 72 __ movflt(Address(rsp, xmm_size * 1), xmm1); 73 __ movflt(Address(rsp, xmm_size * 2), xmm2); 74 __ movflt(Address(rsp, xmm_size * 3), xmm3); 75 __ movflt(Address(rsp, xmm_size * 4), xmm4); 76 __ movflt(Address(rsp, xmm_size * 5), xmm5); 77 __ movflt(Address(rsp, xmm_size * 6), xmm6); 78 __ movflt(Address(rsp, xmm_size * 7), xmm7); 79 } else { 80 __ push_FPU_state(); 81 } 82 } 83 } 84 85 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) { 86 if (handle_fp) { 87 if (UseSSE >= 2) { 88 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4); 89 __ movdbl(xmm0, Address(rsp, xmm_size * 0)); 90 __ movdbl(xmm1, Address(rsp, xmm_size * 1)); 91 __ movdbl(xmm2, Address(rsp, xmm_size * 2)); 92 __ movdbl(xmm3, Address(rsp, xmm_size * 3)); 93 __ movdbl(xmm4, Address(rsp, xmm_size * 4)); 94 __ movdbl(xmm5, Address(rsp, xmm_size * 5)); 95 __ movdbl(xmm6, Address(rsp, xmm_size * 6)); 96 __ movdbl(xmm7, Address(rsp, xmm_size * 7)); 97 __ addptr(rsp, xmm_size * 8); 98 } else if (UseSSE >= 1) { 99 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2); 100 __ movflt(xmm0, Address(rsp, xmm_size * 0)); 101 __ movflt(xmm1, Address(rsp, xmm_size * 1)); 102 __ movflt(xmm2, Address(rsp, xmm_size * 2)); 103 __ movflt(xmm3, Address(rsp, xmm_size * 3)); 104 __ movflt(xmm4, Address(rsp, xmm_size * 4)); 105 __ movflt(xmm5, Address(rsp, xmm_size * 5)); 106 __ movflt(xmm6, Address(rsp, xmm_size * 6)); 107 __ movflt(xmm7, Address(rsp, xmm_size * 7)); 108 __ addptr(rsp, xmm_size * 8); 109 } else { 110 __ pop_FPU_state(); 111 } 112 } 113 114 if (handle_gpr) { 115 __ pop_IU_state(); 116 } 117 } 118 119 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 120 Register src, Register dst, Register count) { 121 122 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 123 124 if (is_reference_type(type)) { 125 if (ShenandoahCardBarrier) { 126 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 127 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 128 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 129 130 // We need to save the original element count because the array copy stub 131 // will destroy the value and we need it for the card marking barrier. 132 #ifdef _LP64 133 if (!checkcast) { 134 if (!obj_int) { 135 // Save count for barrier 136 __ movptr(r11, count); 137 } else if (disjoint) { 138 // Save dst in r11 in the disjoint case 139 __ movq(r11, dst); 140 } 141 } 142 #else 143 if (disjoint) { 144 __ mov(rdx, dst); // save 'to' 145 } 146 #endif 147 } 148 149 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) { 150 #ifdef _LP64 151 Register thread = r15_thread; 152 #else 153 Register thread = rax; 154 if (thread == src || thread == dst || thread == count) { 155 thread = rbx; 156 } 157 if (thread == src || thread == dst || thread == count) { 158 thread = rcx; 159 } 160 if (thread == src || thread == dst || thread == count) { 161 thread = rdx; 162 } 163 __ push(thread); 164 __ get_thread(thread); 165 #endif 166 assert_different_registers(src, dst, count, thread); 167 168 Label L_done; 169 // Short-circuit if count == 0. 170 __ testptr(count, count); 171 __ jcc(Assembler::zero, L_done); 172 173 // Avoid runtime call when not active. 174 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 175 int flags; 176 if (ShenandoahSATBBarrier && dest_uninitialized) { 177 flags = ShenandoahHeap::HAS_FORWARDED; 178 } else { 179 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING; 180 } 181 __ testb(gc_state, flags); 182 __ jcc(Assembler::zero, L_done); 183 184 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); 185 186 #ifdef _LP64 187 assert(src == rdi, "expected"); 188 assert(dst == rsi, "expected"); 189 assert(count == rdx, "expected"); 190 if (UseCompressedOops) { 191 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop), 192 src, dst, count); 193 } else 194 #endif 195 { 196 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop), 197 src, dst, count); 198 } 199 200 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); 201 202 __ bind(L_done); 203 NOT_LP64(__ pop(thread);) 204 } 205 } 206 207 } 208 209 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 210 Register src, Register dst, Register count) { 211 212 if (ShenandoahCardBarrier && is_reference_type(type)) { 213 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 214 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 215 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 216 Register tmp = rax; 217 218 #ifdef _LP64 219 if (!checkcast) { 220 if (!obj_int) { 221 // Save count for barrier 222 count = r11; 223 } else if (disjoint) { 224 // Use the saved dst in the disjoint case 225 dst = r11; 226 } 227 } else { 228 tmp = rscratch1; 229 } 230 #else 231 if (disjoint) { 232 __ mov(dst, rdx); // restore 'to' 233 } 234 #endif 235 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp); 236 } 237 } 238 239 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, 240 Register obj, 241 Register pre_val, 242 Register thread, 243 Register tmp, 244 bool tosca_live, 245 bool expand_call) { 246 247 if (ShenandoahSATBBarrier) { 248 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); 249 } 250 } 251 252 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, 253 Register obj, 254 Register pre_val, 255 Register thread, 256 Register tmp, 257 bool tosca_live, 258 bool expand_call) { 259 // If expand_call is true then we expand the call_VM_leaf macro 260 // directly to skip generating the check by 261 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 262 263 #ifdef _LP64 264 assert(thread == r15_thread, "must be"); 265 #endif // _LP64 266 267 Label done; 268 Label runtime; 269 270 assert(pre_val != noreg, "check this code"); 271 272 if (obj != noreg) { 273 assert_different_registers(obj, pre_val, tmp); 274 assert(pre_val != rax, "check this code"); 275 } 276 277 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 278 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 279 280 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 281 __ testb(gc_state, ShenandoahHeap::MARKING); 282 __ jcc(Assembler::zero, done); 283 284 // Do we need to load the previous value? 285 if (obj != noreg) { 286 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 287 } 288 289 // Is the previous value null? 290 __ cmpptr(pre_val, NULL_WORD); 291 __ jcc(Assembler::equal, done); 292 293 // Can we store original value in the thread's buffer? 294 // Is index == 0? 295 // (The index field is typed as size_t.) 296 297 __ movptr(tmp, index); // tmp := *index_adr 298 __ cmpptr(tmp, 0); // tmp == 0? 299 __ jcc(Assembler::equal, runtime); // If yes, goto runtime 300 301 __ subptr(tmp, wordSize); // tmp := tmp - wordSize 302 __ movptr(index, tmp); // *index_adr := tmp 303 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr 304 305 // Record the previous value 306 __ movptr(Address(tmp, 0), pre_val); 307 __ jmp(done); 308 309 __ bind(runtime); 310 // save the live input values 311 if(tosca_live) __ push(rax); 312 313 if (obj != noreg && obj != rax) 314 __ push(obj); 315 316 if (pre_val != rax) 317 __ push(pre_val); 318 319 // Calling the runtime using the regular call_VM_leaf mechanism generates 320 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 321 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr. 322 // 323 // If we care generating the pre-barrier without a frame (e.g. in the 324 // intrinsified Reference.get() routine) then ebp might be pointing to 325 // the caller frame and so this check will most likely fail at runtime. 326 // 327 // Expanding the call directly bypasses the generation of the check. 328 // So when we do not have have a full interpreter frame on the stack 329 // expand_call should be passed true. 330 331 NOT_LP64( __ push(thread); ) 332 333 #ifdef _LP64 334 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should 335 // pre_val be c_rarg1 (where the call prologue would copy thread argument). 336 // Note: this should not accidentally smash thread, because thread is always r15. 337 assert(thread != c_rarg0, "smashed arg"); 338 if (c_rarg0 != pre_val) { 339 __ mov(c_rarg0, pre_val); 340 } 341 #endif 342 343 if (expand_call) { 344 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) 345 #ifdef _LP64 346 if (c_rarg1 != thread) { 347 __ mov(c_rarg1, thread); 348 } 349 // Already moved pre_val into c_rarg0 above 350 #else 351 __ push(thread); 352 __ push(pre_val); 353 #endif 354 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), 2); 355 } else { 356 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread); 357 } 358 359 NOT_LP64( __ pop(thread); ) 360 361 // save the live input values 362 if (pre_val != rax) 363 __ pop(pre_val); 364 365 if (obj != noreg && obj != rax) 366 __ pop(obj); 367 368 if(tosca_live) __ pop(rax); 369 370 __ bind(done); 371 } 372 373 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) { 374 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 375 376 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 377 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 378 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 379 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 380 bool is_narrow = UseCompressedOops && !is_native; 381 382 Label heap_stable, not_cset; 383 384 __ block_comment("load_reference_barrier { "); 385 386 // Check if GC is active 387 #ifdef _LP64 388 Register thread = r15_thread; 389 #else 390 Register thread = rcx; 391 if (thread == dst) { 392 thread = rbx; 393 } 394 __ push(thread); 395 __ get_thread(thread); 396 #endif 397 398 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 399 int flags = ShenandoahHeap::HAS_FORWARDED; 400 if (!is_strong) { 401 flags |= ShenandoahHeap::WEAK_ROOTS; 402 } 403 __ testb(gc_state, flags); 404 __ jcc(Assembler::zero, heap_stable); 405 406 Register tmp1 = noreg, tmp2 = noreg; 407 if (is_strong) { 408 // Test for object in cset 409 // Allocate temporary registers 410 for (int i = 0; i < 8; i++) { 411 Register r = as_Register(i); 412 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) { 413 if (tmp1 == noreg) { 414 tmp1 = r; 415 } else { 416 tmp2 = r; 417 break; 418 } 419 } 420 } 421 assert(tmp1 != noreg, "tmp1 allocated"); 422 assert(tmp2 != noreg, "tmp2 allocated"); 423 assert_different_registers(tmp1, tmp2, src.base(), src.index()); 424 assert_different_registers(tmp1, tmp2, dst); 425 426 __ push(tmp1); 427 __ push(tmp2); 428 429 // Optimized cset-test 430 __ movptr(tmp1, dst); 431 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 432 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 433 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1)); 434 __ testbool(tmp1); 435 __ jcc(Assembler::zero, not_cset); 436 } 437 438 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true); 439 440 // The rest is saved with the optimized path 441 442 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) LP64_ONLY(+4); 443 __ subptr(rsp, num_saved_regs * wordSize); 444 uint slot = num_saved_regs; 445 if (dst != rax) { 446 __ movptr(Address(rsp, (--slot) * wordSize), rax); 447 } 448 __ movptr(Address(rsp, (--slot) * wordSize), rcx); 449 __ movptr(Address(rsp, (--slot) * wordSize), rdx); 450 __ movptr(Address(rsp, (--slot) * wordSize), rdi); 451 __ movptr(Address(rsp, (--slot) * wordSize), rsi); 452 #ifdef _LP64 453 __ movptr(Address(rsp, (--slot) * wordSize), r8); 454 __ movptr(Address(rsp, (--slot) * wordSize), r9); 455 __ movptr(Address(rsp, (--slot) * wordSize), r10); 456 __ movptr(Address(rsp, (--slot) * wordSize), r11); 457 // r12-r15 are callee saved in all calling conventions 458 #endif 459 assert(slot == 0, "must use all slots"); 460 461 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1. 462 #ifdef _LP64 463 Register arg0 = c_rarg0, arg1 = c_rarg1; 464 #else 465 Register arg0 = rdi, arg1 = rsi; 466 #endif 467 if (dst == arg1) { 468 __ lea(arg0, src); 469 __ xchgptr(arg1, arg0); 470 } else { 471 __ lea(arg1, src); 472 __ movptr(arg0, dst); 473 } 474 475 if (is_strong) { 476 if (is_narrow) { 477 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1); 478 } else { 479 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1); 480 } 481 } else if (is_weak) { 482 if (is_narrow) { 483 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1); 484 } else { 485 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1); 486 } 487 } else { 488 assert(is_phantom, "only remaining strength"); 489 assert(!is_narrow, "phantom access cannot be narrow"); 490 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1); 491 } 492 493 #ifdef _LP64 494 __ movptr(r11, Address(rsp, (slot++) * wordSize)); 495 __ movptr(r10, Address(rsp, (slot++) * wordSize)); 496 __ movptr(r9, Address(rsp, (slot++) * wordSize)); 497 __ movptr(r8, Address(rsp, (slot++) * wordSize)); 498 #endif 499 __ movptr(rsi, Address(rsp, (slot++) * wordSize)); 500 __ movptr(rdi, Address(rsp, (slot++) * wordSize)); 501 __ movptr(rdx, Address(rsp, (slot++) * wordSize)); 502 __ movptr(rcx, Address(rsp, (slot++) * wordSize)); 503 504 if (dst != rax) { 505 __ movptr(dst, rax); 506 __ movptr(rax, Address(rsp, (slot++) * wordSize)); 507 } 508 509 assert(slot == num_saved_regs, "must use all slots"); 510 __ addptr(rsp, num_saved_regs * wordSize); 511 512 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true); 513 514 __ bind(not_cset); 515 516 if (is_strong) { 517 __ pop(tmp2); 518 __ pop(tmp1); 519 } 520 521 __ bind(heap_stable); 522 523 __ block_comment("} load_reference_barrier"); 524 525 #ifndef _LP64 526 __ pop(thread); 527 #endif 528 } 529 530 // 531 // Arguments: 532 // 533 // Inputs: 534 // src: oop location, might be clobbered 535 // tmp1: scratch register, might not be valid. 536 // 537 // Output: 538 // dst: oop loaded from src location 539 // 540 // Kill: 541 // tmp1 (if it is valid) 542 // 543 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 544 Register dst, Address src, Register tmp1, Register tmp_thread) { 545 // 1: non-reference load, no additional barrier is needed 546 if (!is_reference_type(type)) { 547 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 548 return; 549 } 550 551 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected"); 552 553 // 2: load a reference from src location and apply LRB if needed 554 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { 555 Register result_dst = dst; 556 bool use_tmp1_for_dst = false; 557 558 // Preserve src location for LRB 559 if (dst == src.base() || dst == src.index()) { 560 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at() 561 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) { 562 dst = tmp1; 563 use_tmp1_for_dst = true; 564 } else { 565 dst = rdi; 566 __ push(dst); 567 } 568 assert_different_registers(dst, src.base(), src.index()); 569 } 570 571 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 572 573 load_reference_barrier(masm, dst, src, decorators); 574 575 // Move loaded oop to final destination 576 if (dst != result_dst) { 577 __ movptr(result_dst, dst); 578 579 if (!use_tmp1_for_dst) { 580 __ pop(dst); 581 } 582 583 dst = result_dst; 584 } 585 } else { 586 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 587 } 588 589 // 3: apply keep-alive barrier if needed 590 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { 591 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 592 593 Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); 594 assert_different_registers(dst, tmp1, tmp_thread); 595 if (!thread->is_valid()) { 596 thread = rdx; 597 } 598 NOT_LP64(__ get_thread(thread)); 599 // Generate the SATB pre-barrier code to log the value of 600 // the referent field in an SATB buffer. 601 shenandoah_write_barrier_pre(masm /* masm */, 602 noreg /* obj */, 603 dst /* pre_val */, 604 thread /* thread */, 605 tmp1 /* tmp */, 606 true /* tosca_live */, 607 true /* expand_call */); 608 609 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 610 } 611 } 612 613 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) { 614 assert(ShenandoahCardBarrier, "Should have been checked by caller"); 615 616 // Does a store check for the oop in register obj. The content of 617 // register obj is destroyed afterwards. 618 619 ShenandoahBarrierSet* ctbs = ShenandoahBarrierSet::barrier_set(); 620 CardTable* ct = ctbs->card_table(); 621 622 __ shrptr(obj, CardTable::card_shift()); 623 624 Address card_addr; 625 626 // The calculation for byte_map_base is as follows: 627 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 628 // So this essentially converts an address to a displacement and it will 629 // never need to be relocated. On 64-bit however the value may be too 630 // large for a 32-bit displacement. 631 intptr_t byte_map_base = (intptr_t)ct->byte_map_base(); 632 if (__ is_simm32(byte_map_base)) { 633 card_addr = Address(noreg, obj, Address::times_1, byte_map_base); 634 } else { 635 // By doing it as an ExternalAddress 'byte_map_base' could be converted to a rip-relative 636 // displacement and done in a single instruction given favorable mapping and a 637 // smarter version of as_Address. However, 'ExternalAddress' generates a relocation 638 // entry and that entry is not properly handled by the relocation code. 639 AddressLiteral cardtable((address)byte_map_base, relocInfo::none); 640 Address index(noreg, obj, Address::times_1); 641 card_addr = __ as_Address(ArrayAddress(cardtable, index), rscratch1); 642 } 643 644 int dirty = CardTable::dirty_card_val(); 645 if (UseCondCardMark) { 646 Label L_already_dirty; 647 __ cmpb(card_addr, dirty); 648 __ jccb(Assembler::equal, L_already_dirty); 649 __ movb(card_addr, dirty); 650 __ bind(L_already_dirty); 651 } else { 652 __ movb(card_addr, dirty); 653 } 654 } 655 656 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 657 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 658 659 bool on_oop = is_reference_type(type); 660 bool in_heap = (decorators & IN_HEAP) != 0; 661 bool as_normal = (decorators & AS_NORMAL) != 0; 662 if (on_oop && in_heap) { 663 bool needs_pre_barrier = as_normal; 664 665 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 666 // flatten object address if needed 667 // We do it regardless of precise because we need the registers 668 if (dst.index() == noreg && dst.disp() == 0) { 669 if (dst.base() != tmp1) { 670 __ movptr(tmp1, dst.base()); 671 } 672 } else { 673 __ lea(tmp1, dst); 674 } 675 676 assert_different_registers(val, tmp1, tmp2, tmp3, rthread); 677 678 #ifndef _LP64 679 __ get_thread(rthread); 680 InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm); 681 imasm->save_bcp(); 682 #endif 683 684 if (needs_pre_barrier) { 685 shenandoah_write_barrier_pre(masm /*masm*/, 686 tmp1 /* obj */, 687 tmp2 /* pre_val */, 688 rthread /* thread */, 689 tmp3 /* tmp */, 690 val != noreg /* tosca_live */, 691 false /* expand_call */); 692 } 693 694 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg); 695 if (val != noreg) { 696 if (ShenandoahCardBarrier) { 697 store_check(masm, tmp1); 698 } 699 } 700 NOT_LP64(imasm->restore_bcp()); 701 } else { 702 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); 703 } 704 } 705 706 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 707 Register obj, Register tmp, Label& slowpath) { 708 Label done; 709 // Resolve jobject 710 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); 711 712 // Check for null. 713 __ testptr(obj, obj); 714 __ jcc(Assembler::zero, done); 715 716 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()); 717 __ testb(gc_state, ShenandoahHeap::EVACUATION); 718 __ jccb(Assembler::notZero, slowpath); 719 __ bind(done); 720 } 721 722 // Special Shenandoah CAS implementation that handles false negatives 723 // due to concurrent evacuation. 724 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 725 Register res, Address addr, Register oldval, Register newval, 726 bool exchange, Register tmp1, Register tmp2) { 727 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled"); 728 assert(oldval == rax, "must be in rax for implicit use in cmpxchg"); 729 assert_different_registers(oldval, tmp1, tmp2); 730 assert_different_registers(newval, tmp1, tmp2); 731 732 Label L_success, L_failure; 733 734 // Remember oldval for retry logic below 735 #ifdef _LP64 736 if (UseCompressedOops) { 737 __ movl(tmp1, oldval); 738 } else 739 #endif 740 { 741 __ movptr(tmp1, oldval); 742 } 743 744 // Step 1. Fast-path. 745 // 746 // Try to CAS with given arguments. If successful, then we are done. 747 748 #ifdef _LP64 749 if (UseCompressedOops) { 750 __ lock(); 751 __ cmpxchgl(newval, addr); 752 } else 753 #endif 754 { 755 __ lock(); 756 __ cmpxchgptr(newval, addr); 757 } 758 __ jcc(Assembler::equal, L_success); 759 760 // Step 2. CAS had failed. This may be a false negative. 761 // 762 // The trouble comes when we compare the to-space pointer with the from-space 763 // pointer to the same object. To resolve this, it will suffice to resolve 764 // the value from memory -- this will give both to-space pointers. 765 // If they mismatch, then it was a legitimate failure. 766 // 767 // Before reaching to resolve sequence, see if we can avoid the whole shebang 768 // with filters. 769 770 // Filter: when offending in-memory value is null, the failure is definitely legitimate 771 __ testptr(oldval, oldval); 772 __ jcc(Assembler::zero, L_failure); 773 774 // Filter: when heap is stable, the failure is definitely legitimate 775 #ifdef _LP64 776 const Register thread = r15_thread; 777 #else 778 const Register thread = tmp2; 779 __ get_thread(thread); 780 #endif 781 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 782 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED); 783 __ jcc(Assembler::zero, L_failure); 784 785 #ifdef _LP64 786 if (UseCompressedOops) { 787 __ movl(tmp2, oldval); 788 __ decode_heap_oop(tmp2); 789 } else 790 #endif 791 { 792 __ movptr(tmp2, oldval); 793 } 794 795 // Decode offending in-memory value. 796 // Test if-forwarded 797 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value); 798 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded 799 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded 800 801 // Load and mask forwarding pointer 802 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes())); 803 __ shrptr(tmp2, 2); 804 __ shlptr(tmp2, 2); 805 806 #ifdef _LP64 807 if (UseCompressedOops) { 808 __ decode_heap_oop(tmp1); // decode for comparison 809 } 810 #endif 811 812 // Now we have the forwarded offender in tmp2. 813 // Compare and if they don't match, we have legitimate failure 814 __ cmpptr(tmp1, tmp2); 815 __ jcc(Assembler::notEqual, L_failure); 816 817 // Step 3. Need to fix the memory ptr before continuing. 818 // 819 // At this point, we have from-space oldval in the register, and its to-space 820 // address is in tmp2. Let's try to update it into memory. We don't care if it 821 // succeeds or not. If it does, then the retrying CAS would see it and succeed. 822 // If this fixup fails, this means somebody else beat us to it, and necessarily 823 // with to-space ptr store. We still have to do the retry, because the GC might 824 // have updated the reference for us. 825 826 #ifdef _LP64 827 if (UseCompressedOops) { 828 __ encode_heap_oop(tmp2); // previously decoded at step 2. 829 } 830 #endif 831 832 #ifdef _LP64 833 if (UseCompressedOops) { 834 __ lock(); 835 __ cmpxchgl(tmp2, addr); 836 } else 837 #endif 838 { 839 __ lock(); 840 __ cmpxchgptr(tmp2, addr); 841 } 842 843 // Step 4. Try to CAS again. 844 // 845 // This is guaranteed not to have false negatives, because oldval is definitely 846 // to-space, and memory pointer is to-space as well. Nothing is able to store 847 // from-space ptr into memory anymore. Make sure oldval is restored, after being 848 // garbled during retries. 849 // 850 #ifdef _LP64 851 if (UseCompressedOops) { 852 __ movl(oldval, tmp2); 853 } else 854 #endif 855 { 856 __ movptr(oldval, tmp2); 857 } 858 859 #ifdef _LP64 860 if (UseCompressedOops) { 861 __ lock(); 862 __ cmpxchgl(newval, addr); 863 } else 864 #endif 865 { 866 __ lock(); 867 __ cmpxchgptr(newval, addr); 868 } 869 if (!exchange) { 870 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump 871 } 872 873 // Step 5. If we need a boolean result out of CAS, set the flag appropriately. 874 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS. 875 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return. 876 877 if (exchange) { 878 __ bind(L_failure); 879 __ bind(L_success); 880 } else { 881 assert(res != noreg, "need result register"); 882 883 Label exit; 884 __ bind(L_failure); 885 __ xorptr(res, res); 886 __ jmpb(exit); 887 888 __ bind(L_success); 889 __ movptr(res, 1); 890 __ bind(exit); 891 } 892 } 893 894 #ifdef PRODUCT 895 #define BLOCK_COMMENT(str) /* nothing */ 896 #else 897 #define BLOCK_COMMENT(str) __ block_comment(str) 898 #endif 899 900 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 901 902 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 903 904 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, 905 Register addr, Register count, 906 Register tmp) { 907 assert(ShenandoahCardBarrier, "Should have been checked by caller"); 908 909 ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); 910 CardTable* ct = bs->card_table(); 911 intptr_t disp = (intptr_t) ct->byte_map_base(); 912 913 Label L_loop, L_done; 914 const Register end = count; 915 assert_different_registers(addr, end); 916 917 // Zero count? Nothing to do. 918 __ testl(count, count); 919 __ jccb(Assembler::zero, L_done); 920 921 #ifdef _LP64 922 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size 923 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 924 __ shrptr(addr, CardTable::card_shift()); 925 __ shrptr(end, CardTable::card_shift()); 926 __ subptr(end, addr); // end --> cards count 927 928 __ mov64(tmp, disp); 929 __ addptr(addr, tmp); 930 931 __ BIND(L_loop); 932 __ movb(Address(addr, count, Address::times_1), 0); 933 __ decrement(count); 934 __ jccb(Assembler::greaterEqual, L_loop); 935 #else 936 __ lea(end, Address(addr, count, Address::times_ptr, -wordSize)); 937 __ shrptr(addr, CardTable::card_shift()); 938 __ shrptr(end, CardTable::card_shift()); 939 __ subptr(end, addr); // end --> count 940 941 __ BIND(L_loop); 942 Address cardtable(addr, count, Address::times_1, disp); 943 __ movb(cardtable, 0); 944 __ decrement(count); 945 __ jccb(Assembler::greaterEqual, L_loop); 946 #endif 947 948 __ BIND(L_done); 949 } 950 951 #undef __ 952 953 #ifdef COMPILER1 954 955 #define __ ce->masm()-> 956 957 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { 958 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 959 // At this point we know that marking is in progress. 960 // If do_load() is true then we have to emit the 961 // load of the previous value; otherwise it has already 962 // been loaded into _pre_val. 963 964 __ bind(*stub->entry()); 965 assert(stub->pre_val()->is_register(), "Precondition."); 966 967 Register pre_val_reg = stub->pre_val()->as_register(); 968 969 if (stub->do_load()) { 970 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/); 971 } 972 973 __ cmpptr(pre_val_reg, NULL_WORD); 974 __ jcc(Assembler::equal, *stub->continuation()); 975 ce->store_parameter(stub->pre_val()->as_register(), 0); 976 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 977 __ jmp(*stub->continuation()); 978 979 } 980 981 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { 982 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 983 __ bind(*stub->entry()); 984 985 DecoratorSet decorators = stub->decorators(); 986 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 987 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 988 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 989 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 990 991 Register obj = stub->obj()->as_register(); 992 Register res = stub->result()->as_register(); 993 Register addr = stub->addr()->as_pointer_register(); 994 Register tmp1 = stub->tmp1()->as_register(); 995 Register tmp2 = stub->tmp2()->as_register(); 996 assert_different_registers(obj, res, addr, tmp1, tmp2); 997 998 Label slow_path; 999 1000 assert(res == rax, "result must arrive in rax"); 1001 1002 if (res != obj) { 1003 __ mov(res, obj); 1004 } 1005 1006 if (is_strong) { 1007 // Check for object being in the collection set. 1008 __ mov(tmp1, res); 1009 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 1010 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 1011 #ifdef _LP64 1012 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1)); 1013 __ testbool(tmp2); 1014 #else 1015 // On x86_32, C1 register allocator can give us the register without 8-bit support. 1016 // Do the full-register access and test to avoid compilation failures. 1017 __ movptr(tmp2, Address(tmp2, tmp1, Address::times_1)); 1018 __ testptr(tmp2, 0xFF); 1019 #endif 1020 __ jcc(Assembler::zero, *stub->continuation()); 1021 } 1022 1023 __ bind(slow_path); 1024 ce->store_parameter(res, 0); 1025 ce->store_parameter(addr, 1); 1026 if (is_strong) { 1027 if (is_native) { 1028 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin())); 1029 } else { 1030 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin())); 1031 } 1032 } else if (is_weak) { 1033 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin())); 1034 } else { 1035 assert(is_phantom, "only remaining strength"); 1036 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin())); 1037 } 1038 __ jmp(*stub->continuation()); 1039 } 1040 1041 #undef __ 1042 1043 #define __ sasm-> 1044 1045 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 1046 __ prologue("shenandoah_pre_barrier", false); 1047 // arg0 : previous value of memory 1048 1049 __ push(rax); 1050 __ push(rdx); 1051 1052 const Register pre_val = rax; 1053 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1054 const Register tmp = rdx; 1055 1056 NOT_LP64(__ get_thread(thread);) 1057 1058 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 1059 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 1060 1061 Label done; 1062 Label runtime; 1063 1064 // Is SATB still active? 1065 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 1066 __ testb(gc_state, ShenandoahHeap::MARKING); 1067 __ jcc(Assembler::zero, done); 1068 1069 // Can we store original value in the thread's buffer? 1070 1071 __ movptr(tmp, queue_index); 1072 __ testptr(tmp, tmp); 1073 __ jcc(Assembler::zero, runtime); 1074 __ subptr(tmp, wordSize); 1075 __ movptr(queue_index, tmp); 1076 __ addptr(tmp, buffer); 1077 1078 // prev_val (rax) 1079 __ load_parameter(0, pre_val); 1080 __ movptr(Address(tmp, 0), pre_val); 1081 __ jmp(done); 1082 1083 __ bind(runtime); 1084 1085 __ save_live_registers_no_oop_map(true); 1086 1087 // load the pre-value 1088 __ load_parameter(0, rcx); 1089 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), rcx, thread); 1090 1091 __ restore_live_registers(true); 1092 1093 __ bind(done); 1094 1095 __ pop(rdx); 1096 __ pop(rax); 1097 1098 __ epilogue(); 1099 } 1100 1101 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) { 1102 __ prologue("shenandoah_load_reference_barrier", false); 1103 // arg0 : object to be resolved 1104 1105 __ save_live_registers_no_oop_map(true); 1106 1107 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 1108 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 1109 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 1110 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 1111 1112 #ifdef _LP64 1113 __ load_parameter(0, c_rarg0); 1114 __ load_parameter(1, c_rarg1); 1115 if (is_strong) { 1116 if (is_native) { 1117 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1); 1118 } else { 1119 if (UseCompressedOops) { 1120 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1); 1121 } else { 1122 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1); 1123 } 1124 } 1125 } else if (is_weak) { 1126 assert(!is_native, "weak must not be called off-heap"); 1127 if (UseCompressedOops) { 1128 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1); 1129 } else { 1130 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1); 1131 } 1132 } else { 1133 assert(is_phantom, "only remaining strength"); 1134 assert(is_native, "phantom must only be called off-heap"); 1135 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1); 1136 } 1137 #else 1138 __ load_parameter(0, rax); 1139 __ load_parameter(1, rbx); 1140 if (is_strong) { 1141 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), rax, rbx); 1142 } else if (is_weak) { 1143 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), rax, rbx); 1144 } else { 1145 assert(is_phantom, "only remaining strength"); 1146 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), rax, rbx); 1147 } 1148 #endif 1149 1150 __ restore_live_registers_except_rax(true); 1151 1152 __ epilogue(); 1153 } 1154 1155 #undef __ 1156 1157 #endif // COMPILER1