1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
29 #include "gc/shenandoah/shenandoahForwarding.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "gc/shenandoah/mode/shenandoahMode.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "runtime/javaThread.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "utilities/macros.hpp"
40 #ifdef COMPILER1
41 #include "c1/c1_LIRAssembler.hpp"
42 #include "c1/c1_MacroAssembler.hpp"
43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
44 #endif
45
46 #define __ masm->
47
48 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
49 if (handle_gpr) {
50 __ push_IU_state();
51 }
52
53 if (handle_fp) {
54 // Some paths can be reached from the c2i adapter with live fp arguments in registers.
55 LP64_ONLY(assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call"));
56
57 if (UseSSE >= 2) {
58 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4);
59 __ subptr(rsp, xmm_size * 8);
60 __ movdbl(Address(rsp, xmm_size * 0), xmm0);
61 __ movdbl(Address(rsp, xmm_size * 1), xmm1);
62 __ movdbl(Address(rsp, xmm_size * 2), xmm2);
63 __ movdbl(Address(rsp, xmm_size * 3), xmm3);
64 __ movdbl(Address(rsp, xmm_size * 4), xmm4);
65 __ movdbl(Address(rsp, xmm_size * 5), xmm5);
66 __ movdbl(Address(rsp, xmm_size * 6), xmm6);
67 __ movdbl(Address(rsp, xmm_size * 7), xmm7);
68 } else if (UseSSE >= 1) {
69 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2);
70 __ subptr(rsp, xmm_size * 8);
71 __ movflt(Address(rsp, xmm_size * 0), xmm0);
72 __ movflt(Address(rsp, xmm_size * 1), xmm1);
73 __ movflt(Address(rsp, xmm_size * 2), xmm2);
74 __ movflt(Address(rsp, xmm_size * 3), xmm3);
75 __ movflt(Address(rsp, xmm_size * 4), xmm4);
76 __ movflt(Address(rsp, xmm_size * 5), xmm5);
77 __ movflt(Address(rsp, xmm_size * 6), xmm6);
78 __ movflt(Address(rsp, xmm_size * 7), xmm7);
79 } else {
80 __ push_FPU_state();
81 }
82 }
83 }
84
85 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
86 if (handle_fp) {
87 if (UseSSE >= 2) {
88 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4);
89 __ movdbl(xmm0, Address(rsp, xmm_size * 0));
90 __ movdbl(xmm1, Address(rsp, xmm_size * 1));
91 __ movdbl(xmm2, Address(rsp, xmm_size * 2));
92 __ movdbl(xmm3, Address(rsp, xmm_size * 3));
93 __ movdbl(xmm4, Address(rsp, xmm_size * 4));
94 __ movdbl(xmm5, Address(rsp, xmm_size * 5));
95 __ movdbl(xmm6, Address(rsp, xmm_size * 6));
96 __ movdbl(xmm7, Address(rsp, xmm_size * 7));
97 __ addptr(rsp, xmm_size * 8);
98 } else if (UseSSE >= 1) {
99 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2);
100 __ movflt(xmm0, Address(rsp, xmm_size * 0));
101 __ movflt(xmm1, Address(rsp, xmm_size * 1));
102 __ movflt(xmm2, Address(rsp, xmm_size * 2));
103 __ movflt(xmm3, Address(rsp, xmm_size * 3));
104 __ movflt(xmm4, Address(rsp, xmm_size * 4));
105 __ movflt(xmm5, Address(rsp, xmm_size * 5));
106 __ movflt(xmm6, Address(rsp, xmm_size * 6));
107 __ movflt(xmm7, Address(rsp, xmm_size * 7));
108 __ addptr(rsp, xmm_size * 8);
109 } else {
110 __ pop_FPU_state();
111 }
112 }
113
114 if (handle_gpr) {
115 __ pop_IU_state();
116 }
117 }
118
119 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
120 Register src, Register dst, Register count) {
121
122 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
123
124 if (is_reference_type(type)) {
125 if (ShenandoahCardBarrier) {
126 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
127 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
128 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
129
130 // We need to save the original element count because the array copy stub
131 // will destroy the value and we need it for the card marking barrier.
132 #ifdef _LP64
133 if (!checkcast) {
134 if (!obj_int) {
135 // Save count for barrier
136 __ movptr(r11, count);
137 } else if (disjoint) {
138 // Save dst in r11 in the disjoint case
139 __ movq(r11, dst);
140 }
141 }
142 #else
143 if (disjoint) {
144 __ mov(rdx, dst); // save 'to'
145 }
146 #endif
147 }
148
149 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
150 #ifdef _LP64
151 Register thread = r15_thread;
152 #else
153 Register thread = rax;
154 if (thread == src || thread == dst || thread == count) {
155 thread = rbx;
156 }
157 if (thread == src || thread == dst || thread == count) {
158 thread = rcx;
159 }
160 if (thread == src || thread == dst || thread == count) {
161 thread = rdx;
162 }
163 __ push(thread);
164 __ get_thread(thread);
165 #endif
166 assert_different_registers(src, dst, count, thread);
167
168 Label L_done;
169 // Short-circuit if count == 0.
170 __ testptr(count, count);
171 __ jcc(Assembler::zero, L_done);
172
173 // Avoid runtime call when not active.
174 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
175 int flags;
176 if (ShenandoahSATBBarrier && dest_uninitialized) {
177 flags = ShenandoahHeap::HAS_FORWARDED;
178 } else {
179 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
180 }
181 __ testb(gc_state, flags);
182 __ jcc(Assembler::zero, L_done);
183
184 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
185
186 #ifdef _LP64
187 assert(src == rdi, "expected");
188 assert(dst == rsi, "expected");
189 assert(count == rdx, "expected");
190 if (UseCompressedOops) {
191 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry),
192 src, dst, count);
193 } else
194 #endif
195 {
196 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry),
197 src, dst, count);
198 }
199
200 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
201
202 __ bind(L_done);
203 NOT_LP64(__ pop(thread);)
204 }
205 }
206
207 }
208
209 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
210 Register src, Register dst, Register count) {
211
212 if (ShenandoahCardBarrier && is_reference_type(type)) {
213 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
214 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
215 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
216 Register tmp = rax;
217
218 #ifdef _LP64
219 if (!checkcast) {
220 if (!obj_int) {
221 // Save count for barrier
222 count = r11;
223 } else if (disjoint) {
224 // Use the saved dst in the disjoint case
225 dst = r11;
226 }
227 } else {
228 tmp = rscratch1;
229 }
230 #else
231 if (disjoint) {
232 __ mov(dst, rdx); // restore 'to'
233 }
234 #endif
235 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
236 }
237 }
238
239 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
240 Register obj,
241 Register pre_val,
242 Register thread,
243 Register tmp,
244 bool tosca_live,
245 bool expand_call) {
246
247 if (ShenandoahSATBBarrier) {
248 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
249 }
250 }
251
252 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
253 Register obj,
254 Register pre_val,
255 Register thread,
256 Register tmp,
257 bool tosca_live,
258 bool expand_call) {
259 // If expand_call is true then we expand the call_VM_leaf macro
260 // directly to skip generating the check by
261 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
262
263 #ifdef _LP64
264 assert(thread == r15_thread, "must be");
265 #endif // _LP64
266
267 Label done;
268 Label runtime;
269
270 assert(pre_val != noreg, "check this code");
271
272 if (obj != noreg) {
273 assert_different_registers(obj, pre_val, tmp);
274 assert(pre_val != rax, "check this code");
275 }
276
277 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
278 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
279
280 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
281 __ testb(gc_state, ShenandoahHeap::MARKING);
282 __ jcc(Assembler::zero, done);
283
284 // Do we need to load the previous value?
285 if (obj != noreg) {
286 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
287 }
288
289 // Is the previous value null?
290 __ cmpptr(pre_val, NULL_WORD);
291 __ jcc(Assembler::equal, done);
292
293 // Can we store original value in the thread's buffer?
294 // Is index == 0?
295 // (The index field is typed as size_t.)
296
297 __ movptr(tmp, index); // tmp := *index_adr
298 __ cmpptr(tmp, 0); // tmp == 0?
299 __ jcc(Assembler::equal, runtime); // If yes, goto runtime
300
301 __ subptr(tmp, wordSize); // tmp := tmp - wordSize
302 __ movptr(index, tmp); // *index_adr := tmp
303 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
304
305 // Record the previous value
306 __ movptr(Address(tmp, 0), pre_val);
307 __ jmp(done);
308
309 __ bind(runtime);
310 // save the live input values
311 if(tosca_live) __ push(rax);
312
313 if (obj != noreg && obj != rax)
314 __ push(obj);
315
316 if (pre_val != rax)
317 __ push(pre_val);
318
319 // Calling the runtime using the regular call_VM_leaf mechanism generates
320 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
321 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
322 //
323 // If we care generating the pre-barrier without a frame (e.g. in the
324 // intrinsified Reference.get() routine) then ebp might be pointing to
325 // the caller frame and so this check will most likely fail at runtime.
326 //
327 // Expanding the call directly bypasses the generation of the check.
328 // So when we do not have have a full interpreter frame on the stack
329 // expand_call should be passed true.
330
331 NOT_LP64( __ push(thread); )
332
333 #ifdef _LP64
334 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
335 // pre_val be c_rarg1 (where the call prologue would copy thread argument).
336 // Note: this should not accidentally smash thread, because thread is always r15.
337 assert(thread != c_rarg0, "smashed arg");
338 if (c_rarg0 != pre_val) {
339 __ mov(c_rarg0, pre_val);
340 }
341 #endif
342
343 if (expand_call) {
344 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
345 #ifdef _LP64
346 if (c_rarg1 != thread) {
347 __ mov(c_rarg1, thread);
348 }
349 // Already moved pre_val into c_rarg0 above
350 #else
351 __ push(thread);
352 __ push(pre_val);
353 #endif
354 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), 2);
355 } else {
356 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread);
357 }
358
359 NOT_LP64( __ pop(thread); )
360
361 // save the live input values
362 if (pre_val != rax)
363 __ pop(pre_val);
364
365 if (obj != noreg && obj != rax)
366 __ pop(obj);
367
368 if(tosca_live) __ pop(rax);
369
370 __ bind(done);
371 }
372
373 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) {
374 assert(ShenandoahLoadRefBarrier, "Should be enabled");
375
376 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
377 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
378 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
379 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
380 bool is_narrow = UseCompressedOops && !is_native;
381
382 Label heap_stable, not_cset;
383
384 __ block_comment("load_reference_barrier { ");
385
386 // Check if GC is active
387 #ifdef _LP64
388 Register thread = r15_thread;
389 #else
390 Register thread = rcx;
391 if (thread == dst) {
392 thread = rbx;
393 }
394 __ push(thread);
395 __ get_thread(thread);
396 #endif
397
398 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
399 int flags = ShenandoahHeap::HAS_FORWARDED;
400 if (!is_strong) {
401 flags |= ShenandoahHeap::WEAK_ROOTS;
402 }
403 __ testb(gc_state, flags);
404 __ jcc(Assembler::zero, heap_stable);
405
406 Register tmp1 = noreg, tmp2 = noreg;
407 if (is_strong) {
408 // Test for object in cset
409 // Allocate temporary registers
410 for (int i = 0; i < 8; i++) {
411 Register r = as_Register(i);
412 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) {
413 if (tmp1 == noreg) {
414 tmp1 = r;
415 } else {
416 tmp2 = r;
417 break;
418 }
419 }
420 }
421 assert(tmp1 != noreg, "tmp1 allocated");
422 assert(tmp2 != noreg, "tmp2 allocated");
423 assert_different_registers(tmp1, tmp2, src.base(), src.index());
424 assert_different_registers(tmp1, tmp2, dst);
425
426 __ push(tmp1);
427 __ push(tmp2);
428
429 // Optimized cset-test
430 __ movptr(tmp1, dst);
431 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
432 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
433 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1));
434 __ testbool(tmp1);
435 __ jcc(Assembler::zero, not_cset);
436 }
437
438 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
439
440 // The rest is saved with the optimized path
441
442 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) LP64_ONLY(+4);
443 __ subptr(rsp, num_saved_regs * wordSize);
444 uint slot = num_saved_regs;
445 if (dst != rax) {
446 __ movptr(Address(rsp, (--slot) * wordSize), rax);
447 }
448 __ movptr(Address(rsp, (--slot) * wordSize), rcx);
449 __ movptr(Address(rsp, (--slot) * wordSize), rdx);
450 __ movptr(Address(rsp, (--slot) * wordSize), rdi);
451 __ movptr(Address(rsp, (--slot) * wordSize), rsi);
452 #ifdef _LP64
453 __ movptr(Address(rsp, (--slot) * wordSize), r8);
454 __ movptr(Address(rsp, (--slot) * wordSize), r9);
455 __ movptr(Address(rsp, (--slot) * wordSize), r10);
456 __ movptr(Address(rsp, (--slot) * wordSize), r11);
457 // r12-r15 are callee saved in all calling conventions
458 #endif
459 assert(slot == 0, "must use all slots");
460
461 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1.
462 #ifdef _LP64
463 Register arg0 = c_rarg0, arg1 = c_rarg1;
464 #else
465 Register arg0 = rdi, arg1 = rsi;
466 #endif
467 if (dst == arg1) {
468 __ lea(arg0, src);
469 __ xchgptr(arg1, arg0);
470 } else {
471 __ lea(arg1, src);
472 __ movptr(arg0, dst);
473 }
474
475 if (is_strong) {
476 if (is_narrow) {
477 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1);
478 } else {
479 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1);
480 }
481 } else if (is_weak) {
482 if (is_narrow) {
483 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1);
484 } else {
485 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1);
486 }
487 } else {
488 assert(is_phantom, "only remaining strength");
489 assert(!is_narrow, "phantom access cannot be narrow");
490 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
491 }
492
493 #ifdef _LP64
494 __ movptr(r11, Address(rsp, (slot++) * wordSize));
495 __ movptr(r10, Address(rsp, (slot++) * wordSize));
496 __ movptr(r9, Address(rsp, (slot++) * wordSize));
497 __ movptr(r8, Address(rsp, (slot++) * wordSize));
498 #endif
499 __ movptr(rsi, Address(rsp, (slot++) * wordSize));
500 __ movptr(rdi, Address(rsp, (slot++) * wordSize));
501 __ movptr(rdx, Address(rsp, (slot++) * wordSize));
502 __ movptr(rcx, Address(rsp, (slot++) * wordSize));
503
504 if (dst != rax) {
505 __ movptr(dst, rax);
506 __ movptr(rax, Address(rsp, (slot++) * wordSize));
507 }
508
509 assert(slot == num_saved_regs, "must use all slots");
510 __ addptr(rsp, num_saved_regs * wordSize);
511
512 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
513
514 __ bind(not_cset);
515
516 if (is_strong) {
517 __ pop(tmp2);
518 __ pop(tmp1);
519 }
520
521 __ bind(heap_stable);
522
523 __ block_comment("} load_reference_barrier");
524
525 #ifndef _LP64
526 __ pop(thread);
527 #endif
528 }
529
530 //
531 // Arguments:
532 //
533 // Inputs:
534 // src: oop location, might be clobbered
535 // tmp1: scratch register, might not be valid.
536 //
537 // Output:
538 // dst: oop loaded from src location
539 //
540 // Kill:
541 // tmp1 (if it is valid)
542 //
543 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
544 Register dst, Address src, Register tmp1, Register tmp_thread) {
545 // 1: non-reference load, no additional barrier is needed
546 if (!is_reference_type(type)) {
547 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
548 return;
549 }
550
551 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
552
553 // 2: load a reference from src location and apply LRB if needed
554 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
555 Register result_dst = dst;
556 bool use_tmp1_for_dst = false;
557
558 // Preserve src location for LRB
559 if (dst == src.base() || dst == src.index()) {
560 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
561 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
562 dst = tmp1;
563 use_tmp1_for_dst = true;
564 } else {
565 dst = rdi;
566 __ push(dst);
567 }
568 assert_different_registers(dst, src.base(), src.index());
569 }
570
571 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
572
573 load_reference_barrier(masm, dst, src, decorators);
574
575 // Move loaded oop to final destination
576 if (dst != result_dst) {
577 __ movptr(result_dst, dst);
578
579 if (!use_tmp1_for_dst) {
580 __ pop(dst);
581 }
582
583 dst = result_dst;
584 }
585 } else {
586 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
587 }
588
589 // 3: apply keep-alive barrier if needed
590 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
591 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
592
593 Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
594 assert_different_registers(dst, tmp1, tmp_thread);
595 if (!thread->is_valid()) {
596 thread = rdx;
597 }
598 NOT_LP64(__ get_thread(thread));
599 // Generate the SATB pre-barrier code to log the value of
600 // the referent field in an SATB buffer.
601 shenandoah_write_barrier_pre(masm /* masm */,
602 noreg /* obj */,
603 dst /* pre_val */,
604 thread /* thread */,
605 tmp1 /* tmp */,
606 true /* tosca_live */,
607 true /* expand_call */);
608
609 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
610 }
611 }
612
613 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
614 assert(ShenandoahCardBarrier, "Should have been checked by caller");
615
616 // Does a store check for the oop in register obj. The content of
617 // register obj is destroyed afterwards.
618 __ shrptr(obj, CardTable::card_shift());
619
620 // We'll use this register as the TLS base address and also later on
621 // to hold the byte_map_base.
622 Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
623 Register tmp = LP64_ONLY(rscratch1) NOT_LP64(rdx);
624
625 #ifndef _LP64
626 // The next two ifs are just to get temporary registers to use for TLS and card table base.
627 if (thread == obj) {
628 thread = rdx;
629 tmp = rsi;
630 }
631 if (tmp == obj) {
632 tmp = rsi;
633 }
634
635 __ push(thread);
636 __ push(tmp);
637 __ get_thread(thread);
638 #endif
639
640 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
641 __ movptr(tmp, curr_ct_holder_addr);
642 Address card_addr(tmp, obj, Address::times_1);
643
644 int dirty = CardTable::dirty_card_val();
645 if (UseCondCardMark) {
646 Label L_already_dirty;
647 __ cmpb(card_addr, dirty);
648 __ jccb(Assembler::equal, L_already_dirty);
649 __ movb(card_addr, dirty);
650 __ bind(L_already_dirty);
651 } else {
652 __ movb(card_addr, dirty);
653 }
654
655 #ifndef _LP64
656 __ pop(tmp);
657 __ pop(thread);
658 #endif
659 }
660
661 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
662 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
663
664 bool on_oop = is_reference_type(type);
665 bool in_heap = (decorators & IN_HEAP) != 0;
666 bool as_normal = (decorators & AS_NORMAL) != 0;
667 if (on_oop && in_heap) {
668 bool needs_pre_barrier = as_normal;
669
670 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
671 // flatten object address if needed
672 // We do it regardless of precise because we need the registers
673 if (dst.index() == noreg && dst.disp() == 0) {
674 if (dst.base() != tmp1) {
675 __ movptr(tmp1, dst.base());
676 }
677 } else {
678 __ lea(tmp1, dst);
679 }
680
681 assert_different_registers(val, tmp1, tmp2, tmp3, rthread);
682
683 #ifndef _LP64
684 __ get_thread(rthread);
685 InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm);
686 imasm->save_bcp();
687 #endif
688
689 if (needs_pre_barrier) {
690 shenandoah_write_barrier_pre(masm /*masm*/,
691 tmp1 /* obj */,
692 tmp2 /* pre_val */,
693 rthread /* thread */,
694 tmp3 /* tmp */,
695 val != noreg /* tosca_live */,
696 false /* expand_call */);
697 }
698
699 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
700 if (val != noreg) {
701 if (ShenandoahCardBarrier) {
702 store_check(masm, tmp1);
703 }
704 }
705 NOT_LP64(imasm->restore_bcp());
706 } else {
707 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
708 }
709 }
710
711 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
712 Register obj, Register tmp, Label& slowpath) {
713 Label done;
714 // Resolve jobject
715 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
716
717 // Check for null.
718 __ testptr(obj, obj);
719 __ jcc(Assembler::zero, done);
720
721 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
722 __ testb(gc_state, ShenandoahHeap::EVACUATION);
723 __ jccb(Assembler::notZero, slowpath);
724 __ bind(done);
725 }
726
727 // Special Shenandoah CAS implementation that handles false negatives
728 // due to concurrent evacuation.
729 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
730 Register res, Address addr, Register oldval, Register newval,
731 bool exchange, Register tmp1, Register tmp2) {
732 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
733 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
734 assert_different_registers(oldval, tmp1, tmp2);
735 assert_different_registers(newval, tmp1, tmp2);
736
737 Label L_success, L_failure;
738
739 // Remember oldval for retry logic below
740 #ifdef _LP64
741 if (UseCompressedOops) {
742 __ movl(tmp1, oldval);
743 } else
744 #endif
745 {
746 __ movptr(tmp1, oldval);
747 }
748
749 // Step 1. Fast-path.
750 //
751 // Try to CAS with given arguments. If successful, then we are done.
752
753 #ifdef _LP64
754 if (UseCompressedOops) {
755 __ lock();
756 __ cmpxchgl(newval, addr);
757 } else
758 #endif
759 {
760 __ lock();
761 __ cmpxchgptr(newval, addr);
762 }
763 __ jcc(Assembler::equal, L_success);
764
765 // Step 2. CAS had failed. This may be a false negative.
766 //
767 // The trouble comes when we compare the to-space pointer with the from-space
768 // pointer to the same object. To resolve this, it will suffice to resolve
769 // the value from memory -- this will give both to-space pointers.
770 // If they mismatch, then it was a legitimate failure.
771 //
772 // Before reaching to resolve sequence, see if we can avoid the whole shebang
773 // with filters.
774
775 // Filter: when offending in-memory value is null, the failure is definitely legitimate
776 __ testptr(oldval, oldval);
777 __ jcc(Assembler::zero, L_failure);
778
779 // Filter: when heap is stable, the failure is definitely legitimate
780 #ifdef _LP64
781 const Register thread = r15_thread;
782 #else
783 const Register thread = tmp2;
784 __ get_thread(thread);
785 #endif
786 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
787 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
788 __ jcc(Assembler::zero, L_failure);
789
790 #ifdef _LP64
791 if (UseCompressedOops) {
792 __ movl(tmp2, oldval);
793 __ decode_heap_oop(tmp2);
794 } else
795 #endif
796 {
797 __ movptr(tmp2, oldval);
798 }
799
800 // Decode offending in-memory value.
801 // Test if-forwarded
802 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
803 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded
804 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded
805
806 // Load and mask forwarding pointer
807 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
808 __ shrptr(tmp2, 2);
809 __ shlptr(tmp2, 2);
810
811 #ifdef _LP64
812 if (UseCompressedOops) {
813 __ decode_heap_oop(tmp1); // decode for comparison
814 }
815 #endif
816
817 // Now we have the forwarded offender in tmp2.
818 // Compare and if they don't match, we have legitimate failure
819 __ cmpptr(tmp1, tmp2);
820 __ jcc(Assembler::notEqual, L_failure);
821
822 // Step 3. Need to fix the memory ptr before continuing.
823 //
824 // At this point, we have from-space oldval in the register, and its to-space
825 // address is in tmp2. Let's try to update it into memory. We don't care if it
826 // succeeds or not. If it does, then the retrying CAS would see it and succeed.
827 // If this fixup fails, this means somebody else beat us to it, and necessarily
828 // with to-space ptr store. We still have to do the retry, because the GC might
829 // have updated the reference for us.
830
831 #ifdef _LP64
832 if (UseCompressedOops) {
833 __ encode_heap_oop(tmp2); // previously decoded at step 2.
834 }
835 #endif
836
837 #ifdef _LP64
838 if (UseCompressedOops) {
839 __ lock();
840 __ cmpxchgl(tmp2, addr);
841 } else
842 #endif
843 {
844 __ lock();
845 __ cmpxchgptr(tmp2, addr);
846 }
847
848 // Step 4. Try to CAS again.
849 //
850 // This is guaranteed not to have false negatives, because oldval is definitely
851 // to-space, and memory pointer is to-space as well. Nothing is able to store
852 // from-space ptr into memory anymore. Make sure oldval is restored, after being
853 // garbled during retries.
854 //
855 #ifdef _LP64
856 if (UseCompressedOops) {
857 __ movl(oldval, tmp2);
858 } else
859 #endif
860 {
861 __ movptr(oldval, tmp2);
862 }
863
864 #ifdef _LP64
865 if (UseCompressedOops) {
866 __ lock();
867 __ cmpxchgl(newval, addr);
868 } else
869 #endif
870 {
871 __ lock();
872 __ cmpxchgptr(newval, addr);
873 }
874 if (!exchange) {
875 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
876 }
877
878 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
879 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
880 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
881
882 if (exchange) {
883 __ bind(L_failure);
884 __ bind(L_success);
885 } else {
886 assert(res != noreg, "need result register");
887
888 Label exit;
889 __ bind(L_failure);
890 __ xorptr(res, res);
891 __ jmpb(exit);
892
893 __ bind(L_success);
894 __ movptr(res, 1);
895 __ bind(exit);
896 }
897 }
898
899 #ifdef PRODUCT
900 #define BLOCK_COMMENT(str) /* nothing */
901 #else
902 #define BLOCK_COMMENT(str) __ block_comment(str)
903 #endif
904
905 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
906
907 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
908
909 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
910 Register addr, Register count,
911 Register tmp) {
912 assert(ShenandoahCardBarrier, "Should have been checked by caller");
913
914 Label L_loop, L_done;
915 const Register end = count;
916 assert_different_registers(addr, end);
917
918 // Zero count? Nothing to do.
919 __ testl(count, count);
920 __ jccb(Assembler::zero, L_done);
921
922 #ifdef _LP64
923 const Register thread = r15_thread;
924 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
925 __ movptr(tmp, curr_ct_holder_addr);
926
927 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
928 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
929 __ shrptr(addr, CardTable::card_shift());
930 __ shrptr(end, CardTable::card_shift());
931 __ subptr(end, addr); // end --> cards count
932
933 __ addptr(addr, tmp);
934
935 __ BIND(L_loop);
936 __ movb(Address(addr, count, Address::times_1), 0);
937 __ decrement(count);
938 __ jccb(Assembler::greaterEqual, L_loop);
939 #else
940 const Register thread = tmp;
941 __ get_thread(thread);
942
943 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
944 __ movptr(tmp, curr_ct_holder_addr);
945
946 __ lea(end, Address(addr, count, Address::times_ptr, -wordSize));
947 __ shrptr(addr, CardTable::card_shift());
948 __ shrptr(end, CardTable::card_shift());
949 __ subptr(end, addr); // end --> count
950
951 __ addptr(addr, tmp);
952
953 __ BIND(L_loop);
954 Address cardtable(addr, count, Address::times_1, 0);
955 __ movb(cardtable, 0);
956 __ decrement(count);
957 __ jccb(Assembler::greaterEqual, L_loop);
958 #endif
959
960 __ BIND(L_done);
961 }
962
963 #undef __
964
965 #ifdef COMPILER1
966
967 #define __ ce->masm()->
968
969 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
970 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
971 // At this point we know that marking is in progress.
972 // If do_load() is true then we have to emit the
973 // load of the previous value; otherwise it has already
974 // been loaded into _pre_val.
975
976 __ bind(*stub->entry());
977 assert(stub->pre_val()->is_register(), "Precondition.");
978
979 Register pre_val_reg = stub->pre_val()->as_register();
980
981 if (stub->do_load()) {
982 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
983 }
984
985 __ cmpptr(pre_val_reg, NULL_WORD);
986 __ jcc(Assembler::equal, *stub->continuation());
987 ce->store_parameter(stub->pre_val()->as_register(), 0);
988 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
989 __ jmp(*stub->continuation());
990
991 }
992
993 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
994 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
995 __ bind(*stub->entry());
996
997 DecoratorSet decorators = stub->decorators();
998 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
999 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1000 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1001 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1002
1003 Register obj = stub->obj()->as_register();
1004 Register res = stub->result()->as_register();
1005 Register addr = stub->addr()->as_pointer_register();
1006 Register tmp1 = stub->tmp1()->as_register();
1007 Register tmp2 = stub->tmp2()->as_register();
1008 assert_different_registers(obj, res, addr, tmp1, tmp2);
1009
1010 Label slow_path;
1011
1012 assert(res == rax, "result must arrive in rax");
1013
1014 if (res != obj) {
1015 __ mov(res, obj);
1016 }
1017
1018 if (is_strong) {
1019 // Check for object being in the collection set.
1020 __ mov(tmp1, res);
1021 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1022 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1023 #ifdef _LP64
1024 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
1025 __ testbool(tmp2);
1026 #else
1027 // On x86_32, C1 register allocator can give us the register without 8-bit support.
1028 // Do the full-register access and test to avoid compilation failures.
1029 __ movptr(tmp2, Address(tmp2, tmp1, Address::times_1));
1030 __ testptr(tmp2, 0xFF);
1031 #endif
1032 __ jcc(Assembler::zero, *stub->continuation());
1033 }
1034
1035 __ bind(slow_path);
1036 ce->store_parameter(res, 0);
1037 ce->store_parameter(addr, 1);
1038 if (is_strong) {
1039 if (is_native) {
1040 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
1041 } else {
1042 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
1043 }
1044 } else if (is_weak) {
1045 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
1046 } else {
1047 assert(is_phantom, "only remaining strength");
1048 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
1049 }
1050 __ jmp(*stub->continuation());
1051 }
1052
1053 #undef __
1054
1055 #define __ sasm->
1056
1057 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
1058 __ prologue("shenandoah_pre_barrier", false);
1059 // arg0 : previous value of memory
1060
1061 __ push(rax);
1062 __ push(rdx);
1063
1064 const Register pre_val = rax;
1065 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1066 const Register tmp = rdx;
1067
1068 NOT_LP64(__ get_thread(thread);)
1069
1070 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1071 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1072
1073 Label done;
1074 Label runtime;
1075
1076 // Is SATB still active?
1077 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1078 __ testb(gc_state, ShenandoahHeap::MARKING);
1079 __ jcc(Assembler::zero, done);
1080
1081 // Can we store original value in the thread's buffer?
1082
1083 __ movptr(tmp, queue_index);
1084 __ testptr(tmp, tmp);
1085 __ jcc(Assembler::zero, runtime);
1086 __ subptr(tmp, wordSize);
1087 __ movptr(queue_index, tmp);
1088 __ addptr(tmp, buffer);
1089
1090 // prev_val (rax)
1091 __ load_parameter(0, pre_val);
1092 __ movptr(Address(tmp, 0), pre_val);
1093 __ jmp(done);
1094
1095 __ bind(runtime);
1096
1097 __ save_live_registers_no_oop_map(true);
1098
1099 // load the pre-value
1100 __ load_parameter(0, rcx);
1101 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), rcx, thread);
1102
1103 __ restore_live_registers(true);
1104
1105 __ bind(done);
1106
1107 __ pop(rdx);
1108 __ pop(rax);
1109
1110 __ epilogue();
1111 }
1112
1113 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
1114 __ prologue("shenandoah_load_reference_barrier", false);
1115 // arg0 : object to be resolved
1116
1117 __ save_live_registers_no_oop_map(true);
1118
1119 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1120 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1121 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1122 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1123
1124 #ifdef _LP64
1125 __ load_parameter(0, c_rarg0);
1126 __ load_parameter(1, c_rarg1);
1127 if (is_strong) {
1128 if (is_native) {
1129 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1130 } else {
1131 if (UseCompressedOops) {
1132 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1);
1133 } else {
1134 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1135 }
1136 }
1137 } else if (is_weak) {
1138 assert(!is_native, "weak must not be called off-heap");
1139 if (UseCompressedOops) {
1140 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
1141 } else {
1142 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
1143 }
1144 } else {
1145 assert(is_phantom, "only remaining strength");
1146 assert(is_native, "phantom must only be called off-heap");
1147 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1);
1148 }
1149 #else
1150 __ load_parameter(0, rax);
1151 __ load_parameter(1, rbx);
1152 if (is_strong) {
1153 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), rax, rbx);
1154 } else if (is_weak) {
1155 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), rax, rbx);
1156 } else {
1157 assert(is_phantom, "only remaining strength");
1158 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), rax, rbx);
1159 }
1160 #endif
1161
1162 __ restore_live_registers_except_rax(true);
1163
1164 __ epilogue();
1165 }
1166
1167 #undef __
1168
1169 #endif // COMPILER1