1 /*
   2  * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  30 #include "gc/shenandoah/shenandoahForwarding.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc/shenandoah/shenandoahRuntime.hpp"
  34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "runtime/javaThread.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "utilities/macros.hpp"
  39 #ifdef COMPILER1
  40 #include "c1/c1_LIRAssembler.hpp"
  41 #include "c1/c1_MacroAssembler.hpp"
  42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  43 #endif
  44 #ifdef COMPILER2
  45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  46 #endif
  47 
  48 #define __ masm->
  49 
  50 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
  51   if (handle_gpr) {
  52     __ push_IU_state();
  53   }
  54 
  55   if (handle_fp) {
  56     // Some paths can be reached from the c2i adapter with live fp arguments in registers.
  57     assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call");
  58 
  59     const int xmm_size = wordSize * 2;
  60     __ subptr(rsp, xmm_size * 8);
  61     __ movdbl(Address(rsp, xmm_size * 0), xmm0);
  62     __ movdbl(Address(rsp, xmm_size * 1), xmm1);
  63     __ movdbl(Address(rsp, xmm_size * 2), xmm2);
  64     __ movdbl(Address(rsp, xmm_size * 3), xmm3);
  65     __ movdbl(Address(rsp, xmm_size * 4), xmm4);
  66     __ movdbl(Address(rsp, xmm_size * 5), xmm5);
  67     __ movdbl(Address(rsp, xmm_size * 6), xmm6);
  68     __ movdbl(Address(rsp, xmm_size * 7), xmm7);
  69   }
  70 }
  71 
  72 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
  73   if (handle_fp) {
  74     const int xmm_size = wordSize * 2;
  75     __ movdbl(xmm0, Address(rsp, xmm_size * 0));
  76     __ movdbl(xmm1, Address(rsp, xmm_size * 1));
  77     __ movdbl(xmm2, Address(rsp, xmm_size * 2));
  78     __ movdbl(xmm3, Address(rsp, xmm_size * 3));
  79     __ movdbl(xmm4, Address(rsp, xmm_size * 4));
  80     __ movdbl(xmm5, Address(rsp, xmm_size * 5));
  81     __ movdbl(xmm6, Address(rsp, xmm_size * 6));
  82     __ movdbl(xmm7, Address(rsp, xmm_size * 7));
  83     __ addptr(rsp, xmm_size * 8);
  84   }
  85 
  86   if (handle_gpr) {
  87     __ pop_IU_state();
  88   }
  89 }
  90 
  91 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  92                                                        Register src, Register dst, Register count) {
  93 
  94   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  95 
  96   if (is_reference_type(type)) {
  97     if (ShenandoahCardBarrier) {
  98       bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
  99       bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
 100       bool obj_int = (type == T_OBJECT) && UseCompressedOops;
 101 
 102       // We need to save the original element count because the array copy stub
 103       // will destroy the value and we need it for the card marking barrier.
 104       if (!checkcast) {
 105         if (!obj_int) {
 106           // Save count for barrier
 107           __ movptr(r11, count);
 108         } else if (disjoint) {
 109           // Save dst in r11 in the disjoint case
 110           __ movq(r11, dst);
 111         }
 112       }
 113     }
 114 
 115     if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
 116       Register thread = r15_thread;
 117       assert_different_registers(src, dst, count, thread);
 118 
 119       Label L_done;
 120       // Short-circuit if count == 0.
 121       __ testptr(count, count);
 122       __ jcc(Assembler::zero, L_done);
 123 
 124       // Avoid runtime call when not active.
 125       Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 126       int flags;
 127       if (ShenandoahSATBBarrier && dest_uninitialized) {
 128         flags = ShenandoahHeap::HAS_FORWARDED;
 129       } else {
 130         flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
 131       }
 132       __ testb(gc_state, flags);
 133       __ jcc(Assembler::zero, L_done);
 134 
 135       save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
 136 
 137       assert(src == rdi, "expected");
 138       assert(dst == rsi, "expected");
 139       assert(count == rdx, "expected");
 140       if (UseCompressedOops) {
 141         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop),
 142                         src, dst, count);
 143       } else {
 144         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop),
 145                         src, dst, count);
 146       }
 147 
 148       restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
 149 
 150       __ bind(L_done);
 151     }
 152   }
 153 
 154 }
 155 
 156 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 157                                                        Register src, Register dst, Register count) {
 158 
 159   if (ShenandoahCardBarrier && is_reference_type(type)) {
 160     bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
 161     bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
 162     bool obj_int = (type == T_OBJECT) && UseCompressedOops;
 163     Register tmp = rax;
 164 
 165     if (!checkcast) {
 166       if (!obj_int) {
 167         // Save count for barrier
 168         count = r11;
 169       } else if (disjoint) {
 170         // Use the saved dst in the disjoint case
 171         dst = r11;
 172       }
 173     } else {
 174       tmp = rscratch1;
 175     }
 176     gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
 177   }
 178 }
 179 
 180 void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
 181                                                  Register obj,
 182                                                  Register pre_val,
 183                                                  Register tmp,
 184                                                  bool tosca_live,
 185                                                  bool expand_call) {
 186   assert(ShenandoahSATBBarrier, "Should be checked by caller");
 187 
 188   // If expand_call is true then we expand the call_VM_leaf macro
 189   // directly to skip generating the check by
 190   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
 191 
 192   const Register thread = r15_thread;
 193 
 194   Label done;
 195   Label runtime;
 196 
 197   assert(pre_val != noreg, "check this code");
 198 
 199   if (obj != noreg) {
 200     assert_different_registers(obj, pre_val, tmp);
 201     assert(pre_val != rax, "check this code");
 202   }
 203 
 204   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 205   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 206 
 207   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 208   __ testb(gc_state, ShenandoahHeap::MARKING);
 209   __ jcc(Assembler::zero, done);
 210 
 211   // Do we need to load the previous value?
 212   if (obj != noreg) {
 213     __ load_heap_oop(pre_val, Address(obj, 0), noreg, AS_RAW);
 214   }
 215 
 216   // Is the previous value null?
 217   __ cmpptr(pre_val, NULL_WORD);
 218   __ jcc(Assembler::equal, done);
 219 
 220   // Can we store original value in the thread's buffer?
 221   // Is index == 0?
 222   // (The index field is typed as size_t.)
 223 
 224   __ movptr(tmp, index);                   // tmp := *index_adr
 225   __ cmpptr(tmp, 0);                       // tmp == 0?
 226   __ jcc(Assembler::equal, runtime);       // If yes, goto runtime
 227 
 228   __ subptr(tmp, wordSize);                // tmp := tmp - wordSize
 229   __ movptr(index, tmp);                   // *index_adr := tmp
 230   __ addptr(tmp, buffer);                  // tmp := tmp + *buffer_adr
 231 
 232   // Record the previous value
 233   __ movptr(Address(tmp, 0), pre_val);
 234   __ jmp(done);
 235 
 236   __ bind(runtime);
 237   // save the live input values
 238   if(tosca_live) __ push(rax);
 239 
 240   if (obj != noreg && obj != rax)
 241     __ push(obj);
 242 
 243   if (pre_val != rax)
 244     __ push(pre_val);
 245 
 246   // Calling the runtime using the regular call_VM_leaf mechanism generates
 247   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
 248   // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
 249   //
 250   // If we care generating the pre-barrier without a frame (e.g. in the
 251   // intrinsified Reference.get() routine) then ebp might be pointing to
 252   // the caller frame and so this check will most likely fail at runtime.
 253   //
 254   // Expanding the call directly bypasses the generation of the check.
 255   // So when we do not have have a full interpreter frame on the stack
 256   // expand_call should be passed true.
 257 
 258   // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
 259   // pre_val be c_rarg1 (where the call prologue would copy thread argument).
 260   // Note: this should not accidentally smash thread, because thread is always r15.
 261   assert(thread != c_rarg0, "smashed arg");
 262   if (c_rarg0 != pre_val) {
 263     __ mov(c_rarg0, pre_val);
 264   }
 265 
 266   if (expand_call) {
 267     assert(pre_val != c_rarg1, "smashed arg");
 268     if (c_rarg1 != thread) {
 269       __ mov(c_rarg1, thread);
 270     }
 271     // Already moved pre_val into c_rarg0 above
 272     __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), 1);
 273   } else {
 274     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
 275   }
 276 
 277   // save the live input values
 278   if (pre_val != rax)
 279     __ pop(pre_val);
 280 
 281   if (obj != noreg && obj != rax)
 282     __ pop(obj);
 283 
 284   if(tosca_live) __ pop(rax);
 285 
 286   __ bind(done);
 287 }
 288 
 289 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) {
 290   assert(ShenandoahLoadRefBarrier, "Should be enabled");
 291 
 292   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 293   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 294   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 295   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 296   bool is_narrow  = UseCompressedOops && !is_native;
 297 
 298   Label heap_stable, not_cset;
 299 
 300   __ block_comment("load_reference_barrier { ");
 301 
 302   // Check if GC is active
 303   Register thread = r15_thread;
 304 
 305   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 306   int flags = ShenandoahHeap::HAS_FORWARDED;
 307   if (!is_strong) {
 308     flags |= ShenandoahHeap::WEAK_ROOTS;
 309   }
 310   __ testb(gc_state, flags);
 311   __ jcc(Assembler::zero, heap_stable);
 312 
 313   Register tmp1 = noreg, tmp2 = noreg;
 314   if (is_strong) {
 315     // Test for object in cset
 316     // Allocate temporary registers
 317     for (int i = 0; i < 8; i++) {
 318       Register r = as_Register(i);
 319       if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) {
 320         if (tmp1 == noreg) {
 321           tmp1 = r;
 322         } else {
 323           tmp2 = r;
 324           break;
 325         }
 326       }
 327     }
 328     assert(tmp1 != noreg, "tmp1 allocated");
 329     assert(tmp2 != noreg, "tmp2 allocated");
 330     assert_different_registers(tmp1, tmp2, src.base(), src.index());
 331     assert_different_registers(tmp1, tmp2, dst);
 332 
 333     __ push(tmp1);
 334     __ push(tmp2);
 335 
 336     // Optimized cset-test
 337     __ movptr(tmp1, dst);
 338     __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 339     __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
 340     __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1));
 341     __ testbool(tmp1);
 342     __ jcc(Assembler::zero, not_cset);
 343   }
 344 
 345   save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
 346 
 347   // The rest is saved with the optimized path
 348 
 349   uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
 350   __ subptr(rsp, num_saved_regs * wordSize);
 351   uint slot = num_saved_regs;
 352   if (dst != rax) {
 353     __ movptr(Address(rsp, (--slot) * wordSize), rax);
 354   }
 355   __ movptr(Address(rsp, (--slot) * wordSize), rcx);
 356   __ movptr(Address(rsp, (--slot) * wordSize), rdx);
 357   __ movptr(Address(rsp, (--slot) * wordSize), rdi);
 358   __ movptr(Address(rsp, (--slot) * wordSize), rsi);
 359   __ movptr(Address(rsp, (--slot) * wordSize), r8);
 360   __ movptr(Address(rsp, (--slot) * wordSize), r9);
 361   __ movptr(Address(rsp, (--slot) * wordSize), r10);
 362   __ movptr(Address(rsp, (--slot) * wordSize), r11);
 363   // Save APX extended registers r16–r31 if enabled
 364   if (UseAPX) {
 365     __ movptr(Address(rsp, (--slot) * wordSize), r16);
 366     __ movptr(Address(rsp, (--slot) * wordSize), r17);
 367     __ movptr(Address(rsp, (--slot) * wordSize), r18);
 368     __ movptr(Address(rsp, (--slot) * wordSize), r19);
 369     __ movptr(Address(rsp, (--slot) * wordSize), r20);
 370     __ movptr(Address(rsp, (--slot) * wordSize), r21);
 371     __ movptr(Address(rsp, (--slot) * wordSize), r22);
 372     __ movptr(Address(rsp, (--slot) * wordSize), r23);
 373     __ movptr(Address(rsp, (--slot) * wordSize), r24);
 374     __ movptr(Address(rsp, (--slot) * wordSize), r25);
 375     __ movptr(Address(rsp, (--slot) * wordSize), r26);
 376     __ movptr(Address(rsp, (--slot) * wordSize), r27);
 377     __ movptr(Address(rsp, (--slot) * wordSize), r28);
 378     __ movptr(Address(rsp, (--slot) * wordSize), r29);
 379     __ movptr(Address(rsp, (--slot) * wordSize), r30);
 380     __ movptr(Address(rsp, (--slot) * wordSize), r31);
 381   }
 382   // r12-r15 are callee saved in all calling conventions
 383   assert(slot == 0, "must use all slots");
 384 
 385   // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1.
 386   Register arg0 = c_rarg0, arg1 = c_rarg1;
 387   if (dst == arg1) {
 388     __ lea(arg0, src);
 389     __ xchgptr(arg1, arg0);
 390   } else {
 391     __ lea(arg1, src);
 392     __ movptr(arg0, dst);
 393   }
 394 
 395   if (is_strong) {
 396     if (is_narrow) {
 397       __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1);
 398     } else {
 399       __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1);
 400     }
 401   } else if (is_weak) {
 402     if (is_narrow) {
 403       __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1);
 404     } else {
 405       __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1);
 406     }
 407   } else {
 408     assert(is_phantom, "only remaining strength");
 409     assert(!is_narrow, "phantom access cannot be narrow");
 410     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
 411   }
 412 
 413   // Restore APX extended registers r31–r16 if previously saved
 414   if (UseAPX) {
 415     __ movptr(r31, Address(rsp, (slot++) * wordSize));
 416     __ movptr(r30, Address(rsp, (slot++) * wordSize));
 417     __ movptr(r29, Address(rsp, (slot++) * wordSize));
 418     __ movptr(r28, Address(rsp, (slot++) * wordSize));
 419     __ movptr(r27, Address(rsp, (slot++) * wordSize));
 420     __ movptr(r26, Address(rsp, (slot++) * wordSize));
 421     __ movptr(r25, Address(rsp, (slot++) * wordSize));
 422     __ movptr(r24, Address(rsp, (slot++) * wordSize));
 423     __ movptr(r23, Address(rsp, (slot++) * wordSize));
 424     __ movptr(r22, Address(rsp, (slot++) * wordSize));
 425     __ movptr(r21, Address(rsp, (slot++) * wordSize));
 426     __ movptr(r20, Address(rsp, (slot++) * wordSize));
 427     __ movptr(r19, Address(rsp, (slot++) * wordSize));
 428     __ movptr(r18, Address(rsp, (slot++) * wordSize));
 429     __ movptr(r17, Address(rsp, (slot++) * wordSize));
 430     __ movptr(r16, Address(rsp, (slot++) * wordSize));
 431   }
 432   __ movptr(r11, Address(rsp, (slot++) * wordSize));
 433   __ movptr(r10, Address(rsp, (slot++) * wordSize));
 434   __ movptr(r9,  Address(rsp, (slot++) * wordSize));
 435   __ movptr(r8,  Address(rsp, (slot++) * wordSize));
 436   __ movptr(rsi, Address(rsp, (slot++) * wordSize));
 437   __ movptr(rdi, Address(rsp, (slot++) * wordSize));
 438   __ movptr(rdx, Address(rsp, (slot++) * wordSize));
 439   __ movptr(rcx, Address(rsp, (slot++) * wordSize));
 440 
 441   if (dst != rax) {
 442     __ movptr(dst, rax);
 443     __ movptr(rax, Address(rsp, (slot++) * wordSize));
 444   }
 445 
 446   assert(slot == num_saved_regs, "must use all slots");
 447   __ addptr(rsp, num_saved_regs * wordSize);
 448 
 449   restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
 450 
 451   __ bind(not_cset);
 452 
 453   if  (is_strong) {
 454     __ pop(tmp2);
 455     __ pop(tmp1);
 456   }
 457 
 458   __ bind(heap_stable);
 459 
 460   __ block_comment("} load_reference_barrier");
 461 }
 462 
 463 //
 464 // Arguments:
 465 //
 466 // Inputs:
 467 //   src:        oop location, might be clobbered
 468 //   tmp1:       scratch register, might not be valid.
 469 //
 470 // Output:
 471 //   dst:        oop loaded from src location
 472 //
 473 // Kill:
 474 //   tmp1 (if it is valid)
 475 //
 476 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 477              Register dst, Address src, Register tmp1) {
 478   // 1: non-reference load, no additional barrier is needed
 479   if (!is_reference_type(type)) {
 480     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
 481     return;
 482   }
 483 
 484   assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
 485 
 486   // 2: load a reference from src location and apply LRB if needed
 487   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
 488     Register result_dst = dst;
 489     bool use_tmp1_for_dst = false;
 490 
 491     // Preserve src location for LRB
 492     if (dst == src.base() || dst == src.index()) {
 493     // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
 494       if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
 495         dst = tmp1;
 496         use_tmp1_for_dst = true;
 497       } else {
 498         dst = rdi;
 499         __ push(dst);
 500       }
 501       assert_different_registers(dst, src.base(), src.index());
 502     }
 503 
 504     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
 505 
 506     load_reference_barrier(masm, dst, src, decorators);
 507 
 508     // Move loaded oop to final destination
 509     if (dst != result_dst) {
 510       __ movptr(result_dst, dst);
 511 
 512       if (!use_tmp1_for_dst) {
 513         __ pop(dst);
 514       }
 515 
 516       dst = result_dst;
 517     }
 518   } else {
 519     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
 520   }
 521 
 522   // 3: apply keep-alive barrier if needed
 523   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
 524     save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
 525 
 526     assert_different_registers(dst, tmp1, r15_thread);
 527     // Generate the SATB pre-barrier code to log the value of
 528     // the referent field in an SATB buffer.
 529     satb_barrier(masm /* masm */,
 530                  noreg /* obj */,
 531                  dst /* pre_val */,
 532                  tmp1 /* tmp */,
 533                  true /* tosca_live */,
 534                  true /* expand_call */);
 535 
 536     restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
 537   }
 538 }
 539 
 540 void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
 541   assert(ShenandoahCardBarrier, "Should have been checked by caller");
 542 
 543   // Does a store check for the oop in register obj. The content of
 544   // register obj is destroyed afterwards.
 545   __ shrptr(obj, CardTable::card_shift());
 546 
 547   // We'll use this register as the TLS base address and also later on
 548   // to hold the byte_map_base.
 549   Register thread = r15_thread;
 550   Register tmp = rscratch1;
 551 
 552   Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
 553   __ movptr(tmp, curr_ct_holder_addr);
 554   Address card_addr(tmp, obj, Address::times_1);
 555 
 556   int dirty = CardTable::dirty_card_val();
 557   if (UseCondCardMark) {
 558     Label L_already_dirty;
 559     __ cmpb(card_addr, dirty);
 560     __ jccb(Assembler::equal, L_already_dirty);
 561     __ movb(card_addr, dirty);
 562     __ bind(L_already_dirty);
 563   } else {
 564     __ movb(card_addr, dirty);
 565   }
 566 }
 567 
 568 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 569               Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
 570 
 571   // 1: non-reference types require no barriers
 572   if (!is_reference_type(type)) {
 573     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
 574     return;
 575   }
 576 
 577   // Flatten object address right away for simplicity: likely needed by barriers
 578   assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
 579   if (dst.index() == noreg && dst.disp() == 0) {
 580     if (dst.base() != tmp1) {
 581       __ movptr(tmp1, dst.base());
 582     }
 583   } else {
 584     __ lea(tmp1, dst);
 585   }
 586 
 587   bool storing_non_null = (val != noreg);
 588 
 589   // 2: pre-barrier: SATB needs the previous value
 590   if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
 591     satb_barrier(masm,
 592                  tmp1 /* obj */,
 593                  tmp2 /* pre_val */,
 594                  tmp3 /* tmp */,
 595                  storing_non_null /* tosca_live */,
 596                  false /* expand_call */);
 597   }
 598 
 599   // Store!
 600   BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
 601 
 602   // 3: post-barrier: card barrier needs store address
 603   if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
 604     card_barrier(masm, tmp1);
 605   }
 606 }
 607 
 608 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
 609                                                                   Register obj, Register tmp, Label& slowpath) {
 610   Label done;
 611   // Resolve jobject
 612   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
 613 
 614   // Check for null.
 615   __ testptr(obj, obj);
 616   __ jcc(Assembler::zero, done);
 617 
 618   Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
 619   __ testb(gc_state, ShenandoahHeap::EVACUATION);
 620   __ jccb(Assembler::notZero, slowpath);
 621   __ bind(done);
 622 }
 623 
 624 // Special Shenandoah CAS implementation that handles false negatives
 625 // due to concurrent evacuation.
 626 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
 627                                                 Register res, Address addr, Register oldval, Register newval,
 628                                                 bool exchange, Register tmp1, Register tmp2) {
 629   assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
 630   assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
 631   assert_different_registers(oldval, tmp1, tmp2);
 632   assert_different_registers(newval, tmp1, tmp2);
 633 
 634   Label L_success, L_failure;
 635 
 636   // Remember oldval for retry logic below
 637   if (UseCompressedOops) {
 638     __ movl(tmp1, oldval);
 639   } else {
 640     __ movptr(tmp1, oldval);
 641   }
 642 
 643   // Step 1. Fast-path.
 644   //
 645   // Try to CAS with given arguments. If successful, then we are done.
 646 
 647   if (UseCompressedOops) {
 648     __ lock();
 649     __ cmpxchgl(newval, addr);
 650   } else {
 651     __ lock();
 652     __ cmpxchgptr(newval, addr);
 653   }
 654   __ jcc(Assembler::equal, L_success);
 655 
 656   // Step 2. CAS had failed. This may be a false negative.
 657   //
 658   // The trouble comes when we compare the to-space pointer with the from-space
 659   // pointer to the same object. To resolve this, it will suffice to resolve
 660   // the value from memory -- this will give both to-space pointers.
 661   // If they mismatch, then it was a legitimate failure.
 662   //
 663   // Before reaching to resolve sequence, see if we can avoid the whole shebang
 664   // with filters.
 665 
 666   // Filter: when offending in-memory value is null, the failure is definitely legitimate
 667   __ testptr(oldval, oldval);
 668   __ jcc(Assembler::zero, L_failure);
 669 
 670   // Filter: when heap is stable, the failure is definitely legitimate
 671   const Register thread = r15_thread;
 672   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 673   __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
 674   __ jcc(Assembler::zero, L_failure);
 675 
 676   if (UseCompressedOops) {
 677     __ movl(tmp2, oldval);
 678     __ decode_heap_oop(tmp2);
 679   } else {
 680     __ movptr(tmp2, oldval);
 681   }
 682 
 683   // Decode offending in-memory value.
 684   // Test if-forwarded
 685   __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
 686   __ jcc(Assembler::noParity, L_failure);  // When odd number of bits, then not forwarded
 687   __ jcc(Assembler::zero, L_failure);      // When it is 00, then also not forwarded
 688 
 689   // Load and mask forwarding pointer
 690   __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
 691   __ shrptr(tmp2, 2);
 692   __ shlptr(tmp2, 2);
 693 
 694   if (UseCompressedOops) {
 695     __ decode_heap_oop(tmp1); // decode for comparison
 696   }
 697 
 698   // Now we have the forwarded offender in tmp2.
 699   // Compare and if they don't match, we have legitimate failure
 700   __ cmpptr(tmp1, tmp2);
 701   __ jcc(Assembler::notEqual, L_failure);
 702 
 703   // Step 3. Need to fix the memory ptr before continuing.
 704   //
 705   // At this point, we have from-space oldval in the register, and its to-space
 706   // address is in tmp2. Let's try to update it into memory. We don't care if it
 707   // succeeds or not. If it does, then the retrying CAS would see it and succeed.
 708   // If this fixup fails, this means somebody else beat us to it, and necessarily
 709   // with to-space ptr store. We still have to do the retry, because the GC might
 710   // have updated the reference for us.
 711 
 712   if (UseCompressedOops) {
 713     __ encode_heap_oop(tmp2); // previously decoded at step 2.
 714   }
 715 
 716   if (UseCompressedOops) {
 717     __ lock();
 718     __ cmpxchgl(tmp2, addr);
 719   } else {
 720     __ lock();
 721     __ cmpxchgptr(tmp2, addr);
 722   }
 723 
 724   // Step 4. Try to CAS again.
 725   //
 726   // This is guaranteed not to have false negatives, because oldval is definitely
 727   // to-space, and memory pointer is to-space as well. Nothing is able to store
 728   // from-space ptr into memory anymore. Make sure oldval is restored, after being
 729   // garbled during retries.
 730   //
 731   if (UseCompressedOops) {
 732     __ movl(oldval, tmp2);
 733   } else {
 734     __ movptr(oldval, tmp2);
 735   }
 736 
 737   if (UseCompressedOops) {
 738     __ lock();
 739     __ cmpxchgl(newval, addr);
 740   } else {
 741     __ lock();
 742     __ cmpxchgptr(newval, addr);
 743   }
 744   if (!exchange) {
 745     __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
 746   }
 747 
 748   // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
 749   // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
 750   // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
 751 
 752   if (exchange) {
 753     __ bind(L_failure);
 754     __ bind(L_success);
 755   } else {
 756     assert(res != noreg, "need result register");
 757 
 758     Label exit;
 759     __ bind(L_failure);
 760     __ xorptr(res, res);
 761     __ jmpb(exit);
 762 
 763     __ bind(L_success);
 764     __ movptr(res, 1);
 765     __ bind(exit);
 766   }
 767 }
 768 
 769 #ifdef PRODUCT
 770 #define BLOCK_COMMENT(str) /* nothing */
 771 #else
 772 #define BLOCK_COMMENT(str) __ block_comment(str)
 773 #endif
 774 
 775 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 776 
 777 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
 778 
 779 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
 780                                                                      Register addr, Register count,
 781                                                                      Register tmp) {
 782   assert(ShenandoahCardBarrier, "Should have been checked by caller");
 783 
 784   Label L_loop, L_done;
 785   const Register end = count;
 786   assert_different_registers(addr, end);
 787 
 788   // Zero count? Nothing to do.
 789   __ testl(count, count);
 790   __ jccb(Assembler::zero, L_done);
 791 
 792   const Register thread = r15_thread;
 793   Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
 794   __ movptr(tmp, curr_ct_holder_addr);
 795 
 796   __ leaq(end, Address(addr, count, TIMES_OOP, 0));  // end == addr+count*oop_size
 797   __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
 798   __ shrptr(addr, CardTable::card_shift());
 799   __ shrptr(end, CardTable::card_shift());
 800   __ subptr(end, addr); // end --> cards count
 801 
 802   __ addptr(addr, tmp);
 803 
 804   __ BIND(L_loop);
 805   __ movb(Address(addr, count, Address::times_1), 0);
 806   __ decrement(count);
 807   __ jccb(Assembler::greaterEqual, L_loop);
 808 
 809   __ BIND(L_done);
 810 }
 811 
 812 #undef __
 813 
 814 #ifdef COMPILER1
 815 
 816 #define __ ce->masm()->
 817 
 818 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
 819   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 820   // At this point we know that marking is in progress.
 821   // If do_load() is true then we have to emit the
 822   // load of the previous value; otherwise it has already
 823   // been loaded into _pre_val.
 824 
 825   __ bind(*stub->entry());
 826   assert(stub->pre_val()->is_register(), "Precondition.");
 827 
 828   Register pre_val_reg = stub->pre_val()->as_register();
 829 
 830   if (stub->do_load()) {
 831     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
 832   }
 833 
 834   __ cmpptr(pre_val_reg, NULL_WORD);
 835   __ jcc(Assembler::equal, *stub->continuation());
 836   ce->store_parameter(stub->pre_val()->as_register(), 0);
 837   __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
 838   __ jmp(*stub->continuation());
 839 
 840 }
 841 
 842 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
 843   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 844   __ bind(*stub->entry());
 845 
 846   DecoratorSet decorators = stub->decorators();
 847   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 848   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 849   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 850   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 851 
 852   Register obj = stub->obj()->as_register();
 853   Register res = stub->result()->as_register();
 854   Register addr = stub->addr()->as_pointer_register();
 855   Register tmp1 = stub->tmp1()->as_register();
 856   Register tmp2 = stub->tmp2()->as_register();
 857   assert_different_registers(obj, res, addr, tmp1, tmp2);
 858 
 859   Label slow_path;
 860 
 861   assert(res == rax, "result must arrive in rax");
 862 
 863   if (res != obj) {
 864     __ mov(res, obj);
 865   }
 866 
 867   if (is_strong) {
 868     // Check for object being in the collection set.
 869     __ mov(tmp1, res);
 870     __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 871     __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
 872     __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
 873     __ testbool(tmp2);
 874     __ jcc(Assembler::zero, *stub->continuation());
 875   }
 876 
 877   __ bind(slow_path);
 878   ce->store_parameter(res, 0);
 879   ce->store_parameter(addr, 1);
 880   if (is_strong) {
 881     if (is_native) {
 882       __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
 883     } else {
 884       __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
 885     }
 886   } else if (is_weak) {
 887     __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
 888   } else {
 889     assert(is_phantom, "only remaining strength");
 890     __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
 891   }
 892   __ jmp(*stub->continuation());
 893 }
 894 
 895 #undef __
 896 
 897 #define __ sasm->
 898 
 899 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
 900   __ prologue("shenandoah_pre_barrier", false);
 901   // arg0 : previous value of memory
 902 
 903   __ push(rax);
 904   __ push(rdx);
 905 
 906   const Register pre_val = rax;
 907   const Register thread = r15_thread;
 908   const Register tmp = rdx;
 909 
 910   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 911   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 912 
 913   Label done;
 914   Label runtime;
 915 
 916   // Is SATB still active?
 917   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 918   __ testb(gc_state, ShenandoahHeap::MARKING);
 919   __ jcc(Assembler::zero, done);
 920 
 921   // Can we store original value in the thread's buffer?
 922 
 923   __ movptr(tmp, queue_index);
 924   __ testptr(tmp, tmp);
 925   __ jcc(Assembler::zero, runtime);
 926   __ subptr(tmp, wordSize);
 927   __ movptr(queue_index, tmp);
 928   __ addptr(tmp, buffer);
 929 
 930   // prev_val (rax)
 931   __ load_parameter(0, pre_val);
 932   __ movptr(Address(tmp, 0), pre_val);
 933   __ jmp(done);
 934 
 935   __ bind(runtime);
 936 
 937   __ save_live_registers_no_oop_map(true);
 938 
 939   // load the pre-value
 940   __ load_parameter(0, rcx);
 941   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), rcx);
 942 
 943   __ restore_live_registers(true);
 944 
 945   __ bind(done);
 946 
 947   __ pop(rdx);
 948   __ pop(rax);
 949 
 950   __ epilogue();
 951 }
 952 
 953 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
 954   __ prologue("shenandoah_load_reference_barrier", false);
 955   // arg0 : object to be resolved
 956 
 957   __ save_live_registers_no_oop_map(true);
 958 
 959   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 960   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 961   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 962   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 963 
 964   __ load_parameter(0, c_rarg0);
 965   __ load_parameter(1, c_rarg1);
 966   if (is_strong) {
 967     if (is_native) {
 968       __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
 969     } else {
 970       if (UseCompressedOops) {
 971         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1);
 972       } else {
 973         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
 974       }
 975     }
 976   } else if (is_weak) {
 977     assert(!is_native, "weak must not be called off-heap");
 978     if (UseCompressedOops) {
 979       __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
 980     } else {
 981       __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
 982     }
 983   } else {
 984     assert(is_phantom, "only remaining strength");
 985     assert(is_native, "phantom must only be called off-heap");
 986     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1);
 987   }
 988 
 989   __ restore_live_registers_except_rax(true);
 990 
 991   __ epilogue();
 992 }
 993 
 994 #undef __
 995 
 996 #endif // COMPILER1
 997 
 998 #ifdef COMPILER2
 999 #undef __
1000 #define __ masm->
1001 
1002 Register ShenandoahBarrierStubC2::select_temp_register(Address addr, Register reg1, Register reg2) {
1003   Register tmp = noreg;
1004   for (int i = 0; i < 8; i++) {
1005     Register r = as_Register(i);
1006     if (r != rsp && r != rbp && r != reg1 && r != reg2 && r != addr.base() && r != addr.index()) {
1007       if (tmp == noreg) {
1008         tmp = r;
1009         break;
1010       }
1011     }
1012   }
1013 
1014   assert(tmp != noreg, "successfully allocated");
1015   assert_different_registers(tmp, reg1, reg2);
1016   assert_different_registers(tmp, addr.base());
1017   assert_different_registers(tmp, addr.index());
1018   return tmp;
1019 }
1020 
1021 void ShenandoahBarrierSetAssembler::gc_state_check_c2(MacroAssembler* masm, const char test_state, BarrierStubC2* slow_stub) {
1022   if (ShenandoahGCStateCheckRemove) {
1023     // Unrealistic: remove all barrier fastpath checks.
1024   } else if (ShenandoahGCStateCheckHotpatch) {
1025     // In the ideal world, we would hot-patch the branch to slow stub with a single
1026     // (unconditional) jump or nop, based on our current GC state. Jump to near target
1027     // within the nmethod (at 32-bit offset) takes 6 bytes.
1028     __ nop(6);
1029   } else {
1030     Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1031     __ testb(gc_state, test_state);
1032     __ jcc(Assembler::notZero, *slow_stub->entry());
1033     __ bind(*slow_stub->continuation());
1034   }
1035 }
1036 
1037 void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src) {
1038   // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
1039   if (node->bottom_type()->isa_narrowoop()) {
1040     __ movl(dst, src);
1041   } else {
1042     __ movq(dst, src);
1043   }
1044 
1045   // Emit barrier if needed
1046   if (!ShenandoahSkipBarriers && ShenandoahLoadBarrierStubC2::needs_barrier(node)) {
1047     Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
1048 
1049     ShenandoahLoadBarrierStubC2* const stub = ShenandoahLoadBarrierStubC2::create(node, dst, src);
1050 
1051     char check = 0;
1052     check |= ShenandoahLoadBarrierStubC2::needs_keep_alive_barrier(node)    ? ShenandoahHeap::MARKING : 0;
1053     check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier(node)      ? ShenandoahHeap::HAS_FORWARDED : 0;
1054     check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
1055     gc_state_check_c2(masm, check, stub);
1056   }
1057 }
1058 
1059 void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm,
1060                                              Address dst, bool dst_narrow,
1061                                              Register src, bool src_narrow,
1062                                              Register tmp) {
1063   // Emit barrier if needed
1064   if (!ShenandoahSkipBarriers && ShenandoahStoreBarrierStubC2::needs_barrier(node)) {
1065     Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
1066 
1067     if (ShenandoahStoreBarrierStubC2::needs_keep_alive_barrier(node)) {
1068       ShenandoahStoreBarrierStubC2* const stub = ShenandoahStoreBarrierStubC2::create(node, dst, dst_narrow, src, src_narrow, tmp);
1069       stub->dont_preserve(tmp); // temp, no need to preserve it
1070 
1071       gc_state_check_c2(masm, ShenandoahHeap::MARKING, stub);
1072     }
1073 
1074     if (ShenandoahStoreBarrierStubC2::needs_card_barrier(node)) {
1075       card_barrier_c2(masm, dst, tmp);
1076     }
1077   }
1078 
1079   // Need to encode into tmp, because we cannot clobber src.
1080   // TODO: Maybe there is a matcher way to test that src is unused after this?
1081   if (dst_narrow && !src_narrow) {
1082     __ movq(tmp, src);
1083     if (ShenandoahStoreBarrierStubC2::src_not_null(node)) {
1084       __ encode_heap_oop_not_null(tmp);
1085     } else {
1086       __ encode_heap_oop(tmp);
1087     }
1088     src = tmp;
1089   }
1090 
1091   // Do the actual store
1092   if (dst_narrow) {
1093     __ movl(dst, src);
1094   } else {
1095     __ movq(dst, src);
1096   }
1097 }
1098 
1099 void ShenandoahBarrierSetAssembler::cae_c2(const MachNode* node, MacroAssembler* masm,
1100               Register res, Address addr, Register oldval, Register newval,
1101               Register tmp1, Register tmp2, bool exchange, bool maybe_null, bool narrow) {
1102 
1103   assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
1104   assert(narrow == UseCompressedOops, "should match");
1105 
1106   // Oldval and newval can be in the same register, but all other registers should be
1107   // distinct for extra safety, as we shuffle register values around.
1108   assert_different_registers(oldval, tmp1, tmp2, addr.base(), addr.index());
1109   assert_different_registers(newval, tmp1, tmp2, addr.base(), addr.index());
1110 
1111   // Remember oldval for retry logic in slow path. We need to do it here,
1112   // because it will be overwritten by the fast-path CAS.
1113   if (ShenandoahCASBarrierStubC2::needs_barrier(node)) {
1114     Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
1115     __ movptr(tmp2, oldval);
1116   }
1117 
1118   // Fast-path: Try to CAS optimistically.
1119   __ lock();
1120   if (narrow) {
1121     __ cmpxchgl(newval, addr);
1122   } else {
1123     __ cmpxchgptr(newval, addr);
1124   }
1125 
1126   // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
1127   // This would be the final result if we do not go slow.
1128   if (!exchange) {
1129     assert(res != noreg, "need result register");
1130     __ setcc(Assembler::equal, res);
1131   } else {
1132     assert(res == noreg, "no result expected");
1133   }
1134 
1135   if (!ShenandoahSkipBarriers && ShenandoahCASBarrierStubC2::needs_barrier(node)) {
1136     Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
1137 
1138     if (ShenandoahCASBarrierStubC2::needs_load_ref_barrier(node) || ShenandoahCASBarrierStubC2::needs_keep_alive_barrier(node)) {
1139       ShenandoahCASBarrierStubC2* const stub = ShenandoahCASBarrierStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, narrow, exchange);
1140       if (res != noreg) {
1141         stub->dont_preserve(res);  // set at the end, no need to save
1142       }
1143       stub->dont_preserve(oldval); // saved explicitly
1144       stub->dont_preserve(tmp1);   // temp, no need to save
1145       stub->preserve(tmp2);        // carries oldval for final retry, must be saved
1146 
1147       // On success, we need to write to SATB if MARKING is set in GCState.
1148       // On failure, we need to run LRB and retry CAS if HAS_FORWARDED is set in GCState.
1149       if (exchange) {
1150         __ setcc(Assembler::equal, tmp1);
1151       }
1152 
1153       char state = 0;
1154       state |= ShenandoahCASBarrierStubC2::needs_load_ref_barrier(node)   ? ShenandoahHeap::HAS_FORWARDED : 0;
1155       state |= ShenandoahCASBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
1156       gc_state_check_c2(masm, state, stub);
1157     }
1158 
1159     if (ShenandoahStoreBarrierStubC2::needs_card_barrier(node)) {
1160       card_barrier_c2(masm, addr, tmp1);
1161     }
1162   }
1163 }
1164 
1165 void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register newval, Address addr, Register tmp) {
1166   assert_different_registers(newval, tmp, addr.base(), addr.index());
1167 
1168   if (node->bottom_type()->isa_narrowoop()) {
1169     __ xchgl(newval, addr);
1170   } else {
1171     __ xchgq(newval, addr);
1172   }
1173 
1174   if (!ShenandoahSkipBarriers && (ShenandoahLoadBarrierStubC2::needs_barrier(node) || ShenandoahStoreBarrierStubC2::needs_card_barrier(node))) {
1175     Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
1176 
1177     if (ShenandoahLoadBarrierStubC2::needs_barrier(node)) {
1178       ShenandoahLoadBarrierStubC2* const stub = ShenandoahLoadBarrierStubC2::create(node, newval, addr);
1179 
1180       char check = 0;
1181       check |= ShenandoahLoadBarrierStubC2::needs_keep_alive_barrier(node)    ? ShenandoahHeap::MARKING : 0;
1182       check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier(node)      ? ShenandoahHeap::HAS_FORWARDED : 0;
1183       check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
1184       gc_state_check_c2(masm, check, stub);
1185     }
1186 
1187     if (ShenandoahStoreBarrierStubC2::needs_card_barrier(node)) {
1188       card_barrier_c2(masm, addr, tmp);
1189     }
1190   }
1191 }
1192 
1193 void ShenandoahBarrierSetAssembler::card_barrier_c2(MacroAssembler* masm, Address dst, Register tmp) {
1194   // TODO: Might be a good place to implement some filters here.
1195   // For example, G1 only flips card marks for stores within a single region.
1196 
1197   __ lea(tmp, dst);
1198   __ shrptr(tmp, CardTable::card_shift());
1199   __ addptr(tmp, Address(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset())));
1200   Address card_address(tmp, 0);
1201 
1202   assert(CardTable::dirty_card_val() == 0, "Encoding assumption");
1203   Label L_done;
1204   if (UseCondCardMark) {
1205     __ cmpb(card_address, 0);
1206     __ jccb(Assembler::equal, L_done);
1207   }
1208   if (UseCompressedOops && CompressedOops::base() == nullptr) {
1209     __ movb(card_address, r12);
1210   } else {
1211     __ movb(card_address, 0);
1212   }
1213   __ bind(L_done);
1214 }
1215 
1216 #undef __
1217 #define __ masm.
1218 
1219 void ShenandoahLoadBarrierStubC2::emit_code(MacroAssembler& masm) {
1220   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
1221 
1222   __ bind(*entry());
1223 
1224   Register tmp = select_temp_register(_src, _dst);
1225 
1226   Label L_lrb_done, L_lrb_slow;
1227   Label L_keepalive_done, L_keepalive_pack_and_done, L_keepalive_slow;
1228   Label L_done;
1229 
1230   // If the object is null, there is no point in applying barriers.
1231   if (_narrow) {
1232     __ testl(_dst, _dst);
1233   } else {
1234     __ testptr(_dst, _dst);
1235   }
1236   if (!_needs_keep_alive_barrier && _needs_load_ref_barrier) {
1237     __ jccb(Assembler::equal, L_done);
1238   } else {
1239     __ jcc(Assembler::equal, L_done);
1240   }
1241 
1242   // Lay out barrier mid-paths here. The goal is to do quick checks/actions
1243   // that can be done without going to slowpath calls. This also allows doing
1244   // shorter branches, where possible.
1245 
1246   if (_needs_keep_alive_barrier) {
1247     // Runtime check for keep-alive, in case the other barrier is enabled.
1248     // Otherwise the fastpath check already checked it.
1249     if (_needs_load_ref_barrier) {
1250       Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1251       __ testb(gc_state, ShenandoahHeap::MARKING);
1252       __ jccb(Assembler::zero, L_keepalive_done);
1253     }
1254 
1255     // If object is narrow, we need to decode it first.
1256     if (_narrow) {
1257       __ decode_heap_oop_not_null(_dst);
1258     }
1259 
1260     // Can we store a value in the given thread's buffer?
1261     // (The index field is typed as size_t.)
1262     Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1263     Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1264 
1265     __ push(tmp);
1266     __ movptr(tmp, index);
1267     __ testptr(tmp, tmp);
1268     __ jcc(Assembler::zero, L_keepalive_slow);
1269     // The buffer is not full, store value into it.
1270     __ subptr(tmp, wordSize);
1271     __ movptr(index, tmp);
1272     __ addptr(tmp, buffer);
1273     __ movptr(Address(tmp, 0), _dst);
1274     __ pop(tmp);
1275 
1276     __ bind(L_keepalive_pack_and_done);
1277     if (_narrow) {
1278       __ encode_heap_oop(_dst);
1279     }
1280     __ bind(L_keepalive_done);
1281   }
1282 
1283   if (_needs_load_ref_barrier) {
1284     bool is_weak = (_node->barrier_data() & ShenandoahBitStrong) == 0;
1285 
1286     // Runtime check for KA, in case the other barrier is enabled.
1287     // Otherwise the fastpath check already checked it.
1288     if (_needs_keep_alive_barrier) {
1289       Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1290       __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | (is_weak ? ShenandoahHeap::WEAK_ROOTS : 0));
1291       __ jccb(Assembler::zero, L_lrb_done);
1292     }
1293 
1294     // Collection set check. Only really applies to strong loads, as weak/phantom loads
1295     // are handled in runtime.
1296     __ push(tmp);
1297     if (!is_weak) {
1298       if (_narrow) {
1299         __ decode_heap_oop_not_null(tmp, _dst);
1300       } else {
1301         __ movptr(tmp, _dst);
1302       }
1303       __ shrptr(tmp, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1304       // Check if cset address is in good spot to just use it as offset. It almost always is.
1305       Address cset_addr_arg;
1306       intptr_t cset_addr = (intptr_t) ShenandoahHeap::in_cset_fast_test_addr();
1307       if ((cset_addr >> 3) < INT32_MAX) {
1308         assert(is_aligned(cset_addr, 8), "Sanity");
1309         cset_addr_arg = Address(tmp, checked_cast<int>(cset_addr >> 3), Address::times_8);
1310       } else {
1311         __ addptr(tmp, cset_addr);
1312         cset_addr_arg = Address(tmp, 0);
1313       }
1314       __ cmpb(cset_addr_arg, 0);
1315       __ jccb(Assembler::notEqual, L_lrb_slow);
1316       __ pop(tmp); // Slow path had popped for us otherwise
1317     } else {
1318       __ jmpb(L_lrb_slow);
1319     }
1320 
1321     __ bind(L_lrb_done);
1322   }
1323 
1324   // Exit here.
1325   __ bind(L_done);
1326   __ jmp(*continuation());
1327 
1328   // Slow paths here. LRB slow path goes first: this allows the short branches from LRB fastpath,
1329   // the overwhelmingly major case.
1330   if (_needs_load_ref_barrier) {
1331     __ bind(L_lrb_slow);
1332     __ pop(tmp); // Immediately pop tmp to make sure the stack is aligned
1333 
1334       // If object is narrow, we need to decode it first.
1335     if (_narrow) {
1336       __ decode_heap_oop_not_null(_dst);
1337     }
1338 
1339     dont_preserve(_dst); // For LRB we must not preserve _dst
1340     {
1341       SaveLiveRegisters save_registers(&masm, this);
1342 
1343       // Shuffle in the arguments. The end result should be:
1344       //   c_rarg0 <-- _dst
1345       //   c_rarg1 <-- lea(_src)
1346       if (c_rarg0 == _dst) {
1347         __ lea(c_rarg1, _src);
1348       } else if (c_rarg1 == _dst) {
1349         // Set up arguments in reverse, and then flip them
1350         __ lea(c_rarg0, _src);
1351         __ xchgptr(c_rarg0, c_rarg1);
1352       } else {
1353         assert_different_registers(c_rarg1, _dst);
1354         __ lea(c_rarg1, _src);
1355         __ movptr(c_rarg0, _dst);
1356       }
1357 
1358       address entry;
1359       if (_narrow) {
1360         if ((_node->barrier_data() & ShenandoahBitStrong) != 0) {
1361           entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
1362         } else if ((_node->barrier_data() & ShenandoahBitWeak) != 0) {
1363           entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
1364         } else if ((_node->barrier_data() & ShenandoahBitPhantom) != 0) {
1365           entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1366         }
1367       } else {
1368         if ((_node->barrier_data() & ShenandoahBitStrong) != 0) {
1369           entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1370         } else if ((_node->barrier_data() & ShenandoahBitWeak) != 0) {
1371           entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1372         } else if ((_node->barrier_data() & ShenandoahBitPhantom) != 0) {
1373           entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1374         }
1375       }
1376       __ call(RuntimeAddress(entry), rax);
1377       assert(!save_registers.contains(_dst), "must not save result register");
1378       __ movptr(_dst, rax);
1379     }
1380     if (_narrow) {
1381       __ encode_heap_oop(_dst);
1382     }
1383     __ jmp(L_lrb_done);
1384   }
1385 
1386   if (_needs_keep_alive_barrier) {
1387     __ bind(L_keepalive_slow);
1388     __ pop(tmp); // Immediately pop to make sure the stack is aligned
1389 
1390     preserve(_dst); // For SATB we must preserve _dst
1391     {
1392       SaveLiveRegisters save_registers(&masm, this);
1393       if (c_rarg0 != _dst) {
1394         __ mov(c_rarg0, _dst);
1395       }
1396       __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre)), rax);
1397     }
1398     __ jmp(L_keepalive_pack_and_done);
1399   }
1400 }
1401 
1402 void ShenandoahStoreBarrierStubC2::emit_code(MacroAssembler& masm) {
1403   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
1404 
1405   __ bind(*entry());
1406 
1407   Label L_runtime, L_preval_null;
1408 
1409   // We need 2 temp registers for this code to work.
1410   // _tmp is already allocated and will carry preval for the call.
1411   // Allocate the other one now.
1412   Register tmp2 = select_temp_register(_dst, _src, _tmp);
1413 
1414   Register preval = _tmp;
1415 
1416   // Load value from memory
1417   if (_dst_narrow) {
1418     __ movl(preval, _dst);
1419   } else {
1420     __ movq(preval, _dst);
1421   }
1422 
1423   // Is the previous value null?
1424   __ testptr(preval, preval);
1425   __ jccb(Assembler::equal, L_preval_null);
1426 
1427   if (_dst_narrow) {
1428     __ decode_heap_oop_not_null(preval);
1429   }
1430 
1431   // Can we store a value in the given thread's buffer?
1432   // (The index field is typed as size_t.)
1433   Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1434   Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1435 
1436   Register slot = tmp2;
1437   __ push(tmp2);
1438   __ movptr(slot, index);
1439   __ testptr(slot, slot);
1440   __ jccb(Assembler::zero, L_runtime);
1441   __ subptr(slot, wordSize);
1442   __ movptr(index, slot);
1443   __ addptr(slot, buffer);
1444   __ movptr(Address(slot, 0), preval);
1445   __ pop(tmp2);
1446 
1447   // Exit here
1448   __ bind(L_preval_null);
1449   __ jmp(*continuation());
1450 
1451   __ bind(L_runtime);
1452   __ pop(tmp2); // Immediately pop tmp to make sure the stack is aligned
1453   {
1454     SaveLiveRegisters save_registers(&masm, this);
1455     if (c_rarg0 != preval) {
1456       __ mov(c_rarg0, preval);
1457     }
1458     // rax is a caller-saved, non-argument-passing register, so it does not
1459     // interfere with c_rarg0 or c_rarg1. If it contained any live value before
1460     // entering this stub, it is saved at this point, and restored after the
1461     // call. If it did not contain any live value, it is free to be used. In
1462     // either case, it is safe to use it here as a call scratch register.
1463     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre)), rax);
1464   }
1465   __ jmp(*continuation());
1466 }
1467 
1468 void ShenandoahCASBarrierStubC2::emit_code(MacroAssembler& masm) {
1469   Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
1470 
1471   __ bind(*entry());
1472 
1473   Label L_final;
1474   Label L_succeded;
1475 
1476   // check if first CAS succeded, if it did we just need to write to SATB
1477   Register tst = _cae ? _tmp1 : _result;
1478   __ testq(tst, tst);
1479   __ jnz(L_succeded);
1480 
1481 
1482   // LRB + CAS Retry
1483             // CAS has failed because the value held at addr does not match expected.
1484             // This may be a false negative because the version in memory might be
1485             // the from-space version of the same object we currently hold to-space
1486             // reference for.
1487             //
1488             // To resolve this, we need to pass the location through the LRB fixup,
1489             // this will make sure that the location has only to-space pointers.
1490             // To avoid calling into runtime often, we cset-check the object first.
1491             // We can inline most of the work here, but there is little point,
1492             // as CAS failures over cset locations must be rare. This fast-slow split
1493             // matches what we do for normal LRB.
1494 
1495             assert(_expected == rax, "expected must be rax");
1496 
1497             // Non-strong references should always go to runtime. We do not expect
1498             // CASes over non-strong locations.
1499             assert((_node->barrier_data() & ShenandoahBitStrong) != 0, "Only strong references for CASes");
1500 
1501             // (Compressed) failure witness is in _expected.
1502             // Unpack it and check if it is in collection set.
1503             if (_narrow) {
1504               __ decode_heap_oop(_expected);
1505             }
1506             __ movptr(_tmp1, _expected);
1507             __ shrptr(_tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1508             __ addptr(_tmp1, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1509             __ cmpb(Address(_tmp1, 0), 0);
1510             __ jcc(Assembler::zero, L_final);
1511 
1512             {
1513               SaveLiveRegisters save_registers(&masm, this);
1514               // Load up failure witness again.
1515               if (c_rarg0 != _expected) {
1516                 __ movptr(c_rarg0, _expected);
1517               }
1518               __ lea(c_rarg1, _addr);
1519 
1520               if (_narrow) {
1521                 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), 2);
1522               } else {
1523                 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), 2);
1524               }
1525               // We have called LRB to fix up the heap location. We do not care about its result,
1526               // as we will just try to CAS the location again.
1527             }
1528 
1529             __ bind(L_final);
1530 
1531             // Try to CAS again with the original expected value.
1532             // At this point, there can no longer be false negatives.
1533             __ movptr(_expected, _tmp2);
1534             __ lock();
1535             if (_narrow) {
1536               __ cmpxchgl(_new_val, _addr);
1537             } else {
1538               __ cmpxchgptr(_new_val, _addr);
1539             }
1540             if (!_cae) {
1541               assert(_result != noreg, "need result register");
1542               __ setcc(Assembler::equal, _result);
1543             } else {
1544               assert(_result == noreg, "no result expected");
1545             }
1546             // If the retry did not succeed skip SATB
1547             __ jcc(Assembler::notEqual, *continuation());
1548 
1549 
1550 
1551     // SATB
1552     __ bind(L_succeded);
1553             Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1554             Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1555 
1556             Label L_satb_pack_and_done, L_runtime;
1557 
1558             Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1559             __ testb(gc_state, ShenandoahHeap::MARKING);
1560             __ jcc(Assembler::zero, *continuation());
1561 
1562             // Paranoia: CAS has succeded, so what was in memory is definitely oldval.
1563             // Instead of pulling it from other code paths, pull it from stashed value.
1564             // TODO: Figure out better way to do this.
1565             __ movptr(_expected, _tmp2);
1566 
1567             // Is the previous value null?
1568             __ cmpptr(_expected, NULL_WORD);
1569             __ jcc(Assembler::equal, *continuation());
1570 
1571             if (_narrow) {
1572               __ decode_heap_oop_not_null(_expected);
1573             }
1574 
1575             // Can we store a value in the given thread's buffer?
1576             // (The index field is typed as size_t.)
1577             __ movptr(_tmp1, index);
1578             __ testptr(_tmp1, _tmp1);
1579             __ jccb(Assembler::zero, L_runtime);
1580             // The buffer is not full, store value into it.
1581             __ subptr(_tmp1, wordSize);
1582             __ movptr(index, _tmp1);
1583             __ addptr(_tmp1, buffer);
1584             __ movptr(Address(_tmp1, 0), _expected);
1585 
1586             __ bind(L_satb_pack_and_done);
1587             if (_narrow) {
1588               __ encode_heap_oop_not_null(_expected);
1589             }
1590             __ jmp(*continuation());
1591 
1592             __ bind(L_runtime);
1593 
1594             // Expected register should not be clobbered.
1595             preserve(_expected);
1596 
1597             // Carry the CAS/CAE result over the slowpath call
1598             if (_cae) {
1599               assert(_result == noreg, "no result expected");
1600             } else {
1601               assert(_result != noreg, "need result register");
1602               preserve(_result);
1603             }
1604             {
1605               SaveLiveRegisters save_registers(&masm, this);
1606               if (c_rarg0 != _expected) {
1607                 __ mov(c_rarg0, _expected);
1608               }
1609               // rax is a caller-saved, non-argument-passing register, so it does not
1610               // interfere with c_rarg0 or c_rarg1. If it contained any live value before
1611               // entering this stub, it is saved at this point, and restored after the
1612               // call. If it did not contain any live value, it is free to be used. In
1613               // either case, it is safe to use it here as a call scratch register.
1614               __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre)), rax);
1615             }
1616             __ jmp(L_satb_pack_and_done);
1617 
1618     __ jmp(*continuation());
1619 }
1620 #undef __
1621 #endif