1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "runtime/javaThread.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "utilities/macros.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44 #ifdef COMPILER2
45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
46 #endif
47
48 #define __ masm->
49
50 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
51 if (handle_gpr) {
52 __ push_IU_state();
53 }
54
55 if (handle_fp) {
56 // Some paths can be reached from the c2i adapter with live fp arguments in registers.
57 assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call");
58
59 const int xmm_size = wordSize * 2;
60 __ subptr(rsp, xmm_size * 8);
61 __ movdbl(Address(rsp, xmm_size * 0), xmm0);
62 __ movdbl(Address(rsp, xmm_size * 1), xmm1);
63 __ movdbl(Address(rsp, xmm_size * 2), xmm2);
64 __ movdbl(Address(rsp, xmm_size * 3), xmm3);
65 __ movdbl(Address(rsp, xmm_size * 4), xmm4);
66 __ movdbl(Address(rsp, xmm_size * 5), xmm5);
67 __ movdbl(Address(rsp, xmm_size * 6), xmm6);
68 __ movdbl(Address(rsp, xmm_size * 7), xmm7);
69 }
70 }
71
72 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
73 if (handle_fp) {
74 const int xmm_size = wordSize * 2;
75 __ movdbl(xmm0, Address(rsp, xmm_size * 0));
76 __ movdbl(xmm1, Address(rsp, xmm_size * 1));
77 __ movdbl(xmm2, Address(rsp, xmm_size * 2));
78 __ movdbl(xmm3, Address(rsp, xmm_size * 3));
79 __ movdbl(xmm4, Address(rsp, xmm_size * 4));
80 __ movdbl(xmm5, Address(rsp, xmm_size * 5));
81 __ movdbl(xmm6, Address(rsp, xmm_size * 6));
82 __ movdbl(xmm7, Address(rsp, xmm_size * 7));
83 __ addptr(rsp, xmm_size * 8);
84 }
85
86 if (handle_gpr) {
87 __ pop_IU_state();
88 }
89 }
90
91 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
92 Register src, Register dst, Register count) {
93
94 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
95
96 if (is_reference_type(type)) {
97 if (ShenandoahCardBarrier) {
98 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
99 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
100 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
101
102 // We need to save the original element count because the array copy stub
103 // will destroy the value and we need it for the card marking barrier.
104 if (!checkcast) {
105 if (!obj_int) {
106 // Save count for barrier
107 __ movptr(r11, count);
108 } else if (disjoint) {
109 // Save dst in r11 in the disjoint case
110 __ movq(r11, dst);
111 }
112 }
113 }
114
115 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
116 Register thread = r15_thread;
117 assert_different_registers(src, dst, count, thread);
118
119 Label L_done;
120 // Short-circuit if count == 0.
121 __ testptr(count, count);
122 __ jcc(Assembler::zero, L_done);
123
124 // Avoid runtime call when not active.
125 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
126 int flags;
127 if (ShenandoahSATBBarrier && dest_uninitialized) {
128 flags = ShenandoahHeap::HAS_FORWARDED;
129 } else {
130 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
131 }
132 __ testb(gc_state, flags);
133 __ jcc(Assembler::zero, L_done);
134
135 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
136
137 assert(src == rdi, "expected");
138 assert(dst == rsi, "expected");
139 assert(count == rdx, "expected");
140 if (UseCompressedOops) {
141 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop),
142 src, dst, count);
143 } else {
144 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop),
145 src, dst, count);
146 }
147
148 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
149
150 __ bind(L_done);
151 }
152 }
153
154 }
155
156 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
157 Register src, Register dst, Register count) {
158
159 if (ShenandoahCardBarrier && is_reference_type(type)) {
160 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
161 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
162 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
163 Register tmp = rax;
164
165 if (!checkcast) {
166 if (!obj_int) {
167 // Save count for barrier
168 count = r11;
169 } else if (disjoint) {
170 // Use the saved dst in the disjoint case
171 dst = r11;
172 }
173 } else {
174 tmp = rscratch1;
175 }
176 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
177 }
178 }
179
180 void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
181 Register obj,
182 Register pre_val,
183 Register tmp,
184 bool tosca_live,
185 bool expand_call) {
186 assert(ShenandoahSATBBarrier, "Should be checked by caller");
187
188 // If expand_call is true then we expand the call_VM_leaf macro
189 // directly to skip generating the check by
190 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
191
192 const Register thread = r15_thread;
193
194 Label done;
195 Label runtime;
196
197 assert(pre_val != noreg, "check this code");
198
199 if (obj != noreg) {
200 assert_different_registers(obj, pre_val, tmp);
201 assert(pre_val != rax, "check this code");
202 }
203
204 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
205 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
206
207 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
208 __ testb(gc_state, ShenandoahHeap::MARKING);
209 __ jcc(Assembler::zero, done);
210
211 // Do we need to load the previous value?
212 if (obj != noreg) {
213 __ load_heap_oop(pre_val, Address(obj, 0), noreg, AS_RAW);
214 }
215
216 // Is the previous value null?
217 __ cmpptr(pre_val, NULL_WORD);
218 __ jcc(Assembler::equal, done);
219
220 // Can we store original value in the thread's buffer?
221 // Is index == 0?
222 // (The index field is typed as size_t.)
223
224 __ movptr(tmp, index); // tmp := *index_adr
225 __ cmpptr(tmp, 0); // tmp == 0?
226 __ jcc(Assembler::equal, runtime); // If yes, goto runtime
227
228 __ subptr(tmp, wordSize); // tmp := tmp - wordSize
229 __ movptr(index, tmp); // *index_adr := tmp
230 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
231
232 // Record the previous value
233 __ movptr(Address(tmp, 0), pre_val);
234 __ jmp(done);
235
236 __ bind(runtime);
237 // save the live input values
238 if(tosca_live) __ push(rax);
239
240 if (obj != noreg && obj != rax)
241 __ push(obj);
242
243 if (pre_val != rax)
244 __ push(pre_val);
245
246 // Calling the runtime using the regular call_VM_leaf mechanism generates
247 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
248 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
249 //
250 // If we care generating the pre-barrier without a frame (e.g. in the
251 // intrinsified Reference.get() routine) then ebp might be pointing to
252 // the caller frame and so this check will most likely fail at runtime.
253 //
254 // Expanding the call directly bypasses the generation of the check.
255 // So when we do not have have a full interpreter frame on the stack
256 // expand_call should be passed true.
257
258 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
259 // pre_val be c_rarg1 (where the call prologue would copy thread argument).
260 // Note: this should not accidentally smash thread, because thread is always r15.
261 assert(thread != c_rarg0, "smashed arg");
262 if (c_rarg0 != pre_val) {
263 __ mov(c_rarg0, pre_val);
264 }
265
266 if (expand_call) {
267 assert(pre_val != c_rarg1, "smashed arg");
268 if (c_rarg1 != thread) {
269 __ mov(c_rarg1, thread);
270 }
271 // Already moved pre_val into c_rarg0 above
272 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), 1);
273 } else {
274 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
275 }
276
277 // save the live input values
278 if (pre_val != rax)
279 __ pop(pre_val);
280
281 if (obj != noreg && obj != rax)
282 __ pop(obj);
283
284 if(tosca_live) __ pop(rax);
285
286 __ bind(done);
287 }
288
289 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) {
290 assert(ShenandoahLoadRefBarrier, "Should be enabled");
291
292 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
293 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
294 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
295 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
296 bool is_narrow = UseCompressedOops && !is_native;
297
298 Label heap_stable, not_cset;
299
300 __ block_comment("load_reference_barrier { ");
301
302 // Check if GC is active
303 Register thread = r15_thread;
304
305 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
306 int flags = ShenandoahHeap::HAS_FORWARDED;
307 if (!is_strong) {
308 flags |= ShenandoahHeap::WEAK_ROOTS;
309 }
310 __ testb(gc_state, flags);
311 __ jcc(Assembler::zero, heap_stable);
312
313 Register tmp1 = noreg, tmp2 = noreg;
314 if (is_strong) {
315 // Test for object in cset
316 // Allocate temporary registers
317 for (int i = 0; i < 8; i++) {
318 Register r = as_Register(i);
319 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) {
320 if (tmp1 == noreg) {
321 tmp1 = r;
322 } else {
323 tmp2 = r;
324 break;
325 }
326 }
327 }
328 assert(tmp1 != noreg, "tmp1 allocated");
329 assert(tmp2 != noreg, "tmp2 allocated");
330 assert_different_registers(tmp1, tmp2, src.base(), src.index());
331 assert_different_registers(tmp1, tmp2, dst);
332
333 __ push(tmp1);
334 __ push(tmp2);
335
336 // Optimized cset-test
337 __ movptr(tmp1, dst);
338 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
339 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
340 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1));
341 __ testbool(tmp1);
342 __ jcc(Assembler::zero, not_cset);
343 }
344
345 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
346
347 // The rest is saved with the optimized path
348
349 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
350 __ subptr(rsp, num_saved_regs * wordSize);
351 uint slot = num_saved_regs;
352 if (dst != rax) {
353 __ movptr(Address(rsp, (--slot) * wordSize), rax);
354 }
355 __ movptr(Address(rsp, (--slot) * wordSize), rcx);
356 __ movptr(Address(rsp, (--slot) * wordSize), rdx);
357 __ movptr(Address(rsp, (--slot) * wordSize), rdi);
358 __ movptr(Address(rsp, (--slot) * wordSize), rsi);
359 __ movptr(Address(rsp, (--slot) * wordSize), r8);
360 __ movptr(Address(rsp, (--slot) * wordSize), r9);
361 __ movptr(Address(rsp, (--slot) * wordSize), r10);
362 __ movptr(Address(rsp, (--slot) * wordSize), r11);
363 // Save APX extended registers r16–r31 if enabled
364 if (UseAPX) {
365 __ movptr(Address(rsp, (--slot) * wordSize), r16);
366 __ movptr(Address(rsp, (--slot) * wordSize), r17);
367 __ movptr(Address(rsp, (--slot) * wordSize), r18);
368 __ movptr(Address(rsp, (--slot) * wordSize), r19);
369 __ movptr(Address(rsp, (--slot) * wordSize), r20);
370 __ movptr(Address(rsp, (--slot) * wordSize), r21);
371 __ movptr(Address(rsp, (--slot) * wordSize), r22);
372 __ movptr(Address(rsp, (--slot) * wordSize), r23);
373 __ movptr(Address(rsp, (--slot) * wordSize), r24);
374 __ movptr(Address(rsp, (--slot) * wordSize), r25);
375 __ movptr(Address(rsp, (--slot) * wordSize), r26);
376 __ movptr(Address(rsp, (--slot) * wordSize), r27);
377 __ movptr(Address(rsp, (--slot) * wordSize), r28);
378 __ movptr(Address(rsp, (--slot) * wordSize), r29);
379 __ movptr(Address(rsp, (--slot) * wordSize), r30);
380 __ movptr(Address(rsp, (--slot) * wordSize), r31);
381 }
382 // r12-r15 are callee saved in all calling conventions
383 assert(slot == 0, "must use all slots");
384
385 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1.
386 Register arg0 = c_rarg0, arg1 = c_rarg1;
387 if (dst == arg1) {
388 __ lea(arg0, src);
389 __ xchgptr(arg1, arg0);
390 } else {
391 __ lea(arg1, src);
392 __ movptr(arg0, dst);
393 }
394
395 if (is_strong) {
396 if (is_narrow) {
397 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1);
398 } else {
399 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1);
400 }
401 } else if (is_weak) {
402 if (is_narrow) {
403 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1);
404 } else {
405 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1);
406 }
407 } else {
408 assert(is_phantom, "only remaining strength");
409 assert(!is_narrow, "phantom access cannot be narrow");
410 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
411 }
412
413 // Restore APX extended registers r31–r16 if previously saved
414 if (UseAPX) {
415 __ movptr(r31, Address(rsp, (slot++) * wordSize));
416 __ movptr(r30, Address(rsp, (slot++) * wordSize));
417 __ movptr(r29, Address(rsp, (slot++) * wordSize));
418 __ movptr(r28, Address(rsp, (slot++) * wordSize));
419 __ movptr(r27, Address(rsp, (slot++) * wordSize));
420 __ movptr(r26, Address(rsp, (slot++) * wordSize));
421 __ movptr(r25, Address(rsp, (slot++) * wordSize));
422 __ movptr(r24, Address(rsp, (slot++) * wordSize));
423 __ movptr(r23, Address(rsp, (slot++) * wordSize));
424 __ movptr(r22, Address(rsp, (slot++) * wordSize));
425 __ movptr(r21, Address(rsp, (slot++) * wordSize));
426 __ movptr(r20, Address(rsp, (slot++) * wordSize));
427 __ movptr(r19, Address(rsp, (slot++) * wordSize));
428 __ movptr(r18, Address(rsp, (slot++) * wordSize));
429 __ movptr(r17, Address(rsp, (slot++) * wordSize));
430 __ movptr(r16, Address(rsp, (slot++) * wordSize));
431 }
432 __ movptr(r11, Address(rsp, (slot++) * wordSize));
433 __ movptr(r10, Address(rsp, (slot++) * wordSize));
434 __ movptr(r9, Address(rsp, (slot++) * wordSize));
435 __ movptr(r8, Address(rsp, (slot++) * wordSize));
436 __ movptr(rsi, Address(rsp, (slot++) * wordSize));
437 __ movptr(rdi, Address(rsp, (slot++) * wordSize));
438 __ movptr(rdx, Address(rsp, (slot++) * wordSize));
439 __ movptr(rcx, Address(rsp, (slot++) * wordSize));
440
441 if (dst != rax) {
442 __ movptr(dst, rax);
443 __ movptr(rax, Address(rsp, (slot++) * wordSize));
444 }
445
446 assert(slot == num_saved_regs, "must use all slots");
447 __ addptr(rsp, num_saved_regs * wordSize);
448
449 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
450
451 __ bind(not_cset);
452
453 if (is_strong) {
454 __ pop(tmp2);
455 __ pop(tmp1);
456 }
457
458 __ bind(heap_stable);
459
460 __ block_comment("} load_reference_barrier");
461 }
462
463 //
464 // Arguments:
465 //
466 // Inputs:
467 // src: oop location, might be clobbered
468 // tmp1: scratch register, might not be valid.
469 //
470 // Output:
471 // dst: oop loaded from src location
472 //
473 // Kill:
474 // tmp1 (if it is valid)
475 //
476 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
477 Register dst, Address src, Register tmp1) {
478 // 1: non-reference load, no additional barrier is needed
479 if (!is_reference_type(type)) {
480 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
481 return;
482 }
483
484 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
485
486 // 2: load a reference from src location and apply LRB if needed
487 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
488 Register result_dst = dst;
489 bool use_tmp1_for_dst = false;
490
491 // Preserve src location for LRB
492 if (dst == src.base() || dst == src.index()) {
493 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
494 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
495 dst = tmp1;
496 use_tmp1_for_dst = true;
497 } else {
498 dst = rdi;
499 __ push(dst);
500 }
501 assert_different_registers(dst, src.base(), src.index());
502 }
503
504 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
505
506 load_reference_barrier(masm, dst, src, decorators);
507
508 // Move loaded oop to final destination
509 if (dst != result_dst) {
510 __ movptr(result_dst, dst);
511
512 if (!use_tmp1_for_dst) {
513 __ pop(dst);
514 }
515
516 dst = result_dst;
517 }
518 } else {
519 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
520 }
521
522 // 3: apply keep-alive barrier if needed
523 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
524 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
525
526 assert_different_registers(dst, tmp1, r15_thread);
527 // Generate the SATB pre-barrier code to log the value of
528 // the referent field in an SATB buffer.
529 satb_barrier(masm /* masm */,
530 noreg /* obj */,
531 dst /* pre_val */,
532 tmp1 /* tmp */,
533 true /* tosca_live */,
534 true /* expand_call */);
535
536 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
537 }
538 }
539
540 void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
541 assert(ShenandoahCardBarrier, "Should have been checked by caller");
542
543 // Does a store check for the oop in register obj. The content of
544 // register obj is destroyed afterwards.
545 __ shrptr(obj, CardTable::card_shift());
546
547 // We'll use this register as the TLS base address and also later on
548 // to hold the byte_map_base.
549 Register thread = r15_thread;
550 Register tmp = rscratch1;
551
552 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
553 __ movptr(tmp, curr_ct_holder_addr);
554 Address card_addr(tmp, obj, Address::times_1);
555
556 int dirty = CardTable::dirty_card_val();
557 if (UseCondCardMark) {
558 Label L_already_dirty;
559 __ cmpb(card_addr, dirty);
560 __ jccb(Assembler::equal, L_already_dirty);
561 __ movb(card_addr, dirty);
562 __ bind(L_already_dirty);
563 } else {
564 __ movb(card_addr, dirty);
565 }
566 }
567
568 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
569 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
570
571 // 1: non-reference types require no barriers
572 if (!is_reference_type(type)) {
573 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
574 return;
575 }
576
577 // Flatten object address right away for simplicity: likely needed by barriers
578 assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
579 if (dst.index() == noreg && dst.disp() == 0) {
580 if (dst.base() != tmp1) {
581 __ movptr(tmp1, dst.base());
582 }
583 } else {
584 __ lea(tmp1, dst);
585 }
586
587 bool storing_non_null = (val != noreg);
588
589 // 2: pre-barrier: SATB needs the previous value
590 if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
591 satb_barrier(masm,
592 tmp1 /* obj */,
593 tmp2 /* pre_val */,
594 tmp3 /* tmp */,
595 storing_non_null /* tosca_live */,
596 false /* expand_call */);
597 }
598
599 // Store!
600 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
601
602 // 3: post-barrier: card barrier needs store address
603 if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
604 card_barrier(masm, tmp1);
605 }
606 }
607
608 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
609 Register obj, Register tmp, Label& slowpath) {
610 Label done;
611 // Resolve jobject
612 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
613
614 // Check for null.
615 __ testptr(obj, obj);
616 __ jcc(Assembler::zero, done);
617
618 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
619 __ testb(gc_state, ShenandoahHeap::EVACUATION);
620 __ jccb(Assembler::notZero, slowpath);
621 __ bind(done);
622 }
623
624 // Special Shenandoah CAS implementation that handles false negatives
625 // due to concurrent evacuation.
626 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
627 Register res, Address addr, Register oldval, Register newval,
628 bool exchange, Register tmp1, Register tmp2) {
629 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
630 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
631 assert_different_registers(oldval, tmp1, tmp2);
632 assert_different_registers(newval, tmp1, tmp2);
633
634 Label L_success, L_failure;
635
636 // Remember oldval for retry logic below
637 if (UseCompressedOops) {
638 __ movl(tmp1, oldval);
639 } else {
640 __ movptr(tmp1, oldval);
641 }
642
643 // Step 1. Fast-path.
644 //
645 // Try to CAS with given arguments. If successful, then we are done.
646
647 if (UseCompressedOops) {
648 __ lock();
649 __ cmpxchgl(newval, addr);
650 } else {
651 __ lock();
652 __ cmpxchgptr(newval, addr);
653 }
654 __ jcc(Assembler::equal, L_success);
655
656 // Step 2. CAS had failed. This may be a false negative.
657 //
658 // The trouble comes when we compare the to-space pointer with the from-space
659 // pointer to the same object. To resolve this, it will suffice to resolve
660 // the value from memory -- this will give both to-space pointers.
661 // If they mismatch, then it was a legitimate failure.
662 //
663 // Before reaching to resolve sequence, see if we can avoid the whole shebang
664 // with filters.
665
666 // Filter: when offending in-memory value is null, the failure is definitely legitimate
667 if (UseCompressedOops) {
668 __ testl(oldval, oldval);
669 } else {
670 __ testptr(oldval, oldval);
671 }
672 __ jcc(Assembler::zero, L_failure);
673
674 // Filter: when heap is stable, the failure is definitely legitimate
675 const Register thread = r15_thread;
676 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
677 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
678 __ jcc(Assembler::zero, L_failure);
679
680 if (UseCompressedOops) {
681 __ movl(tmp2, oldval);
682 __ decode_heap_oop(tmp2);
683 } else {
684 __ movptr(tmp2, oldval);
685 }
686
687 // Decode offending in-memory value.
688 // Test if-forwarded
689 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
690 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded
691 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded
692
693 // Load and mask forwarding pointer
694 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
695 __ shrptr(tmp2, 2);
696 __ shlptr(tmp2, 2);
697
698 if (UseCompressedOops) {
699 __ decode_heap_oop(tmp1); // decode for comparison
700 }
701
702 // Now we have the forwarded offender in tmp2.
703 // Compare and if they don't match, we have legitimate failure
704 __ cmpptr(tmp1, tmp2);
705 __ jcc(Assembler::notEqual, L_failure);
706
707 // Step 3. Need to fix the memory ptr before continuing.
708 //
709 // At this point, we have from-space oldval in the register, and its to-space
710 // address is in tmp2. Let's try to update it into memory. We don't care if it
711 // succeeds or not. If it does, then the retrying CAS would see it and succeed.
712 // If this fixup fails, this means somebody else beat us to it, and necessarily
713 // with to-space ptr store. We still have to do the retry, because the GC might
714 // have updated the reference for us.
715
716 if (UseCompressedOops) {
717 __ encode_heap_oop(tmp2); // previously decoded at step 2.
718 }
719
720 if (UseCompressedOops) {
721 __ lock();
722 __ cmpxchgl(tmp2, addr);
723 } else {
724 __ lock();
725 __ cmpxchgptr(tmp2, addr);
726 }
727
728 // Step 4. Try to CAS again.
729 //
730 // This is guaranteed not to have false negatives, because oldval is definitely
731 // to-space, and memory pointer is to-space as well. Nothing is able to store
732 // from-space ptr into memory anymore. Make sure oldval is restored, after being
733 // garbled during retries.
734 //
735 if (UseCompressedOops) {
736 __ movl(oldval, tmp2);
737 } else {
738 __ movptr(oldval, tmp2);
739 }
740
741 if (UseCompressedOops) {
742 __ lock();
743 __ cmpxchgl(newval, addr);
744 } else {
745 __ lock();
746 __ cmpxchgptr(newval, addr);
747 }
748 if (!exchange) {
749 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
750 }
751
752 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
753 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
754 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
755
756 if (exchange) {
757 __ bind(L_failure);
758 __ bind(L_success);
759 } else {
760 assert(res != noreg, "need result register");
761
762 Label exit;
763 __ bind(L_failure);
764 __ xorptr(res, res);
765 __ jmpb(exit);
766
767 __ bind(L_success);
768 __ movptr(res, 1);
769 __ bind(exit);
770 }
771 }
772
773 #ifdef COMPILER2
774 void ShenandoahBarrierSetAssembler::gc_state_check_c2(MacroAssembler* masm, const char test_state, BarrierStubC2* slow_stub) {
775 const int size = 11;
776 if (ShenandoahNopGCState) {
777 __ nop(size);
778 return;
779 }
780 #ifdef ASSERT
781 address start = __ pc();
782 #endif
783
784 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
785 __ testb(gc_state, test_state);
786 __ jcc(Assembler::notZero, *slow_stub->entry());
787
788 #ifdef ASSERT
789 int actual_size = __ pc() - start;
790 assert(actual_size == size, "Should be: %d == %d", actual_size, size);
791 #endif
792 }
793
794 void ShenandoahBarrierSetAssembler::load_ref_barrier_c2(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp1, Register tmp2, Register tmp3, bool narrow) {
795 if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
796 return;
797 }
798 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
799
800 ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp1, tmp2, tmp3, narrow);
801 stub->dont_preserve(obj); // set at the end, no need to save
802 if (tmp1 != noreg) {
803 stub->dont_preserve(tmp1); // temp, no need to save
804 }
805 if (tmp2 != noreg) {
806 stub->dont_preserve(tmp2); // temp, no need to save
807 }
808 if (tmp3 != noreg) {
809 stub->dont_preserve(tmp3); // temp, no need to save
810 }
811
812 int flags = ShenandoahHeap::HAS_FORWARDED;
813 if ((node->barrier_data() & ShenandoahBarrierStrong) == 0) {
814 flags |= ShenandoahHeap::WEAK_ROOTS;
815 }
816 gc_state_check_c2(masm, flags, stub);
817 __ bind(*stub->continuation());
818 }
819
820 void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm,
821 Register dst,
822 Address src,
823 bool narrow,
824 Register tmp) {
825 // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
826 if (narrow) {
827 __ movl(dst, src);
828 } else {
829 __ movq(dst, src);
830 }
831
832 // Emit barrier if needed
833 if (ShenandoahLoadBarrierStubC2::needs_barrier(node)) {
834 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
835
836 ShenandoahLoadBarrierStubC2* const stub = ShenandoahLoadBarrierStubC2::create(node, dst, src, narrow, tmp);
837 stub->dont_preserve(tmp); // temp, no need to save
838
839 char check = 0;
840 check |= ShenandoahLoadBarrierStubC2::needs_satb_barrier(node) ? ShenandoahHeap::MARKING : 0;
841 check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
842 check |= ShenandoahLoadBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
843 gc_state_check_c2(masm, check, stub);
844 __ bind(*stub->continuation());
845 }
846 }
847
848 void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm,
849 Address dst, bool dst_narrow,
850 Register src, bool src_narrow,
851 Register tmp) {
852 // Emit barrier if needed
853 if (ShenandoahStoreBarrierStubC2::needs_barrier(node)) {
854 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
855
856 if (ShenandoahStoreBarrierStubC2::needs_satb_barrier(node)) {
857 ShenandoahStoreBarrierStubC2* const stub = ShenandoahStoreBarrierStubC2::create(node, dst, dst_narrow, src, src_narrow, tmp);
858 stub->dont_preserve(tmp); // temp, no need to preserve it
859
860 gc_state_check_c2(masm, ShenandoahHeap::MARKING, stub);
861 __ bind(*stub->continuation());
862 }
863
864 if (ShenandoahStoreBarrierStubC2::needs_card_barrier(node)) {
865 // Card table barrier is not conditional on GC state.
866 // You might think this needs to be a post-barrier. But I don't think it does: the card table updates
867 // and stores are not expected to be ordered. As long as there is no safepoint between these stores, we are
868 // free to do them in any order.
869
870 // So it is convenient to pull card table update here. It also follows the stencil we want:
871 // there should be a single gc-state check for every possible fast path. If card table barrier needed
872 // a gc state check, we would have commoned it with gc state check for SATB barrier above, and _then_
873 // called to the slowpath.
874
875 // Using this address compute sequence allows us to use only one temp register.
876 // TODO: Upstream this separately, mainline Shenandoah might benefit from this already?
877 __ lea(tmp, dst);
878 __ shrptr(tmp, CardTable::card_shift());
879 __ addptr(tmp, Address(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset())));
880
881 int dirty = CardTable::dirty_card_val();
882 if (UseCondCardMark) {
883 Label L_already_dirty;
884 __ cmpb(Address(tmp, 0), dirty);
885 __ jccb(Assembler::equal, L_already_dirty);
886 __ movb(Address(tmp, 0), dirty);
887 __ bind(L_already_dirty);
888 } else {
889 __ movb(Address(tmp, 0), dirty);
890 }
891 }
892 }
893
894 // Need to encode into tmp, because we cannot clobber src.
895 // TODO: Maybe there is a matcher way to test that src is unused after this?
896 if (dst_narrow && !src_narrow) {
897 __ movq(tmp, src);
898 if (ShenandoahStoreBarrierStubC2::src_not_null(node)) {
899 __ encode_heap_oop_not_null(tmp);
900 } else {
901 __ encode_heap_oop(tmp);
902 }
903 src = tmp;
904 }
905
906 // Do the actual store
907 if (dst_narrow) {
908 __ movl(dst, src);
909 } else {
910 __ movq(dst, src);
911 }
912 }
913
914 void ShenandoahBarrierSetAssembler::satb_barrier_c2(const MachNode* node, MacroAssembler* masm,
915 Register addr, Register preval, Register tmp) {
916 if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
917 return;
918 }
919 ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, preval, tmp, /* TODO: */ false);
920 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
921
922 gc_state_check_c2(masm, ShenandoahHeap::MARKING, stub);
923 __ bind(*stub->continuation());
924 }
925
926 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm,
927 Register addr, Register addr_tmp, Register tmp) {
928 if ((node->barrier_data() & ShenandoahBarrierCardMark) == 0) {
929 return;
930 }
931 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
932 if (addr != noreg) {
933 __ mov(addr_tmp, addr);
934 }
935 __ shrptr(addr_tmp, CardTable::card_shift());
936
937 Address curr_ct_holder_addr(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
938 __ movptr(tmp, curr_ct_holder_addr);
939 Address card_addr(tmp, addr_tmp, Address::times_1);
940
941 int dirty = CardTable::dirty_card_val();
942 if (UseCondCardMark) {
943 Label L_already_dirty;
944 __ cmpb(card_addr, dirty);
945 __ jccb(Assembler::equal, L_already_dirty);
946 __ movb(card_addr, dirty);
947 __ bind(L_already_dirty);
948 } else {
949 __ movb(card_addr, dirty);
950 }
951 }
952
953 void ShenandoahBarrierSetAssembler::cmpxchg_oop_c2(const MachNode* node, MacroAssembler* masm,
954 Register res, Address addr, Register oldval, Register newval, Register tmp1, Register tmp2,
955 bool exchange) {
956 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
957 assert_different_registers(oldval, tmp1, tmp2);
958 assert_different_registers(newval, tmp1, tmp2);
959
960 // Remember oldval for retry logic in slow path. We need to do it here,
961 // because it will be overwritten by the fast-path CAS.
962 if (ShenandoahCASBarrier) {
963 __ movptr(tmp2, oldval);
964 }
965
966 // Fast-path: Try to CAS optimistically. If successful, then we are done.
967 __ lock();
968 if (UseCompressedOops) {
969 __ cmpxchgl(newval, addr);
970 } else {
971 __ cmpxchgptr(newval, addr);
972 }
973
974 // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
975 // This would be the final result if we do not go slow.
976 if (!exchange) {
977 assert(res != noreg, "need result register");
978 __ setcc(Assembler::equal, res);
979 } else {
980 assert(res == noreg, "no result expected");
981 }
982
983 if (ShenandoahCASBarrier) {
984 ShenandoahCASBarrierSlowStubC2* const stub =
985 ShenandoahCASBarrierSlowStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, exchange);
986 if (res != noreg) {
987 stub->dont_preserve(res); // set at the end, no need to save
988 }
989 stub->dont_preserve(oldval); // saved explicitly
990 stub->dont_preserve(tmp1); // temp, no need to save
991 stub->dont_preserve(tmp2); // temp, no need to save
992
993 // On success, we do not need any additional handling.
994 __ jccb(Assembler::equal, *stub->continuation());
995
996 // If GC is in progress, it is likely we need additional handling for false negatives.
997 // Slow stub re-enters with result set correctly.
998 gc_state_check_c2(masm, ShenandoahHeap::HAS_FORWARDED, stub);
999 __ bind(*stub->continuation());
1000 }
1001 }
1002
1003 #undef __
1004 #define __ masm.
1005
1006 void ShenandoahLoadBarrierStubC2::emit_code(MacroAssembler& masm) {
1007 __ bind(*entry());
1008
1009 assert_different_registers(_tmp, _dst);
1010
1011 Label L_end;
1012
1013 // If the object is null, there is no point in applying barriers.
1014 if (_narrow) {
1015 __ testl(_dst, _dst);
1016 } else {
1017 __ testptr(_dst, _dst);
1018 }
1019 __ jcc(Assembler::equal, *continuation());
1020
1021 // If object is narrow, we need to decode it first.
1022 if (_narrow) {
1023 __ decode_heap_oop_not_null(_dst);
1024 }
1025
1026 if (_needs_load_ref_barrier) {
1027 Label L_lrb_done;
1028
1029 bool is_weak = (_node->barrier_data() & ShenandoahBarrierStrong) == 0;
1030
1031 // Runtime check for LRB
1032 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1033 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | (is_weak ? ShenandoahHeap::WEAK_ROOTS : 0));
1034 __ jcc(Assembler::zero, L_lrb_done);
1035
1036 // Weak/phantom loads always need to go to runtime.
1037 if (!is_weak) {
1038 __ movptr(_tmp, _dst);
1039 __ shrptr(_tmp, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1040 __ addptr(_tmp, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1041 __ testb(Address(_tmp, 0), 0xFF);
1042 __ jcc(Assembler::zero, L_lrb_done);
1043 }
1044
1045 dont_preserve(_dst); // For LRB we must not preserve _dst
1046 {
1047 SaveLiveRegisters save_registers(&masm, this);
1048
1049 // Shuffle in the arguments. The end result should be:
1050 // c_rarg0 <-- _dst
1051 // c_rarg1 <-- lea(_src)
1052 if (c_rarg0 == _dst) {
1053 __ lea(c_rarg1, _src);
1054 } else if (c_rarg1 == _dst) {
1055 // Set up arguments in reverse, and then flip them
1056 __ lea(c_rarg0, _src);
1057 __ xchgptr(c_rarg0, c_rarg1);
1058 } else {
1059 assert_different_registers(c_rarg1, _dst);
1060 __ lea(c_rarg1, _src);
1061 __ movptr(c_rarg0, _dst);
1062 }
1063
1064 address entry;
1065 if (_narrow) {
1066 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
1067 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
1068 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
1069 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
1070 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
1071 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1072 }
1073 } else {
1074 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
1075 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1076 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
1077 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1078 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
1079 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1080 }
1081 }
1082 __ call(RuntimeAddress(entry), rax);
1083 assert(!save_registers.contains(_dst), "must not save result register");
1084 __ movptr(_dst, rax);
1085 }
1086
1087 // Paranoia: if LRB returns null for a weak access, do NOT feed it into SATB, which does not accept null pointers.
1088 __ testptr(_dst, _dst);
1089 __ jcc(Assembler::equal, L_end);
1090
1091 __ bind(L_lrb_done);
1092 }
1093
1094 if (_needs_satb_barrier) {
1095 // Push obj to SATB, if needed.
1096
1097 Label L_satb_done, L_satb_runtime;
1098
1099 // Runtime check for SATB
1100 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1101 __ testb(gc_state, ShenandoahHeap::MARKING);
1102 __ jcc(Assembler::zero, L_satb_done);
1103
1104 // Can we store a value in the given thread's buffer?
1105 // (The index field is typed as size_t.)
1106 Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1107 Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1108
1109 __ movptr(_tmp, index);
1110 __ testptr(_tmp, _tmp);
1111 __ jccb(Assembler::zero, L_satb_runtime);
1112 // The buffer is not full, store value into it.
1113 __ subptr(_tmp, wordSize);
1114 __ movptr(index, _tmp);
1115 __ addptr(_tmp, buffer);
1116 __ movptr(Address(_tmp, 0), _dst);
1117 __ jmp(L_satb_done);
1118
1119 __ bind(L_satb_runtime);
1120
1121 preserve(_dst); // For SATB we must preserve _dst
1122 {
1123 SaveLiveRegisters save_registers(&masm, this);
1124 if (c_rarg0 != _dst) {
1125 __ mov(c_rarg0, _dst);
1126 }
1127 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
1128 }
1129
1130 __ bind(L_satb_done);
1131 }
1132
1133 __ bind(L_end);
1134 if (_narrow) {
1135 __ encode_heap_oop(_dst);
1136 }
1137
1138 __ jmp(*continuation());
1139 }
1140
1141 void ShenandoahStoreBarrierStubC2::emit_code(MacroAssembler& masm) {
1142 __ bind(*entry());
1143
1144 Label L_runtime, L_preval_null;
1145
1146 // We need 2 temp registers for this code to work.
1147 // _tmp is already allocated and will carry preval for the call.
1148 // Allocate the other one now.
1149 Register tmp2 = noreg;
1150 for (int i = 0; i < 8; i++) {
1151 Register r = as_Register(i);
1152 if (r != rsp && r != rbp && r != _src && r != _tmp) {
1153 if (tmp2 == noreg) {
1154 tmp2 = r;
1155 break;
1156 }
1157 }
1158 }
1159
1160 assert(tmp2 != noreg, "tmp2 allocated");
1161 assert_different_registers(_tmp, tmp2, _src);
1162
1163 Register preval = _tmp;
1164 Register slot = tmp2;
1165
1166 // Load value from memory
1167 if (_dst_narrow) {
1168 __ movl(preval, _dst);
1169 } else {
1170 __ movq(preval, _dst);
1171 }
1172
1173 // Is the previous value null?
1174 __ cmpptr(preval, NULL_WORD);
1175 __ jccb(Assembler::equal, L_preval_null);
1176
1177 if (_dst_narrow) {
1178 __ decode_heap_oop_not_null(preval);
1179 }
1180
1181 // Can we store a value in the given thread's buffer?
1182 // (The index field is typed as size_t.)
1183 Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1184 Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1185
1186 __ push(tmp2);
1187 __ movptr(slot, index);
1188 __ testptr(slot, slot);
1189 __ jccb(Assembler::zero, L_runtime);
1190 // The buffer is not full, store value into it.
1191 __ subptr(slot, wordSize);
1192 __ movptr(index, slot);
1193 __ addptr(slot, buffer);
1194 __ movptr(Address(slot, 0), preval);
1195
1196 // Pop temps and exit
1197 __ pop(tmp2);
1198 __ bind(L_preval_null);
1199 __ jmp(*continuation());
1200
1201 __ bind(L_runtime);
1202 __ pop(tmp2);
1203 {
1204 SaveLiveRegisters save_registers(&masm, this);
1205 if (c_rarg0 != preval) {
1206 __ mov(c_rarg0, preval);
1207 }
1208 // rax is a caller-saved, non-argument-passing register, so it does not
1209 // interfere with c_rarg0 or c_rarg1. If it contained any live value before
1210 // entering this stub, it is saved at this point, and restored after the
1211 // call. If it did not contain any live value, it is free to be used. In
1212 // either case, it is safe to use it here as a call scratch register.
1213 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
1214 }
1215 __ jmp(*continuation());
1216 }
1217
1218 void ShenandoahLoadRefBarrierStubC2::emit_code(MacroAssembler& masm) {
1219 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
1220 __ bind(*entry());
1221
1222 Register obj = _obj;
1223 if (_narrow) {
1224 __ movl(_tmp1, _obj);
1225 __ decode_heap_oop(_tmp1);
1226 obj = _tmp1;
1227 }
1228
1229 // Weak/phantom loads always need to go to runtime.
1230 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
1231 __ movptr(_tmp2, obj);
1232 __ shrptr(_tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1233 __ movptr(_tmp3, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1234 __ movbool(_tmp2, Address(_tmp2, _tmp3, Address::times_1));
1235 __ testbool(_tmp2);
1236 __ jcc(Assembler::zero, *continuation());
1237 }
1238
1239 {
1240 SaveLiveRegisters save_registers(&masm, this);
1241 if (c_rarg0 != obj) {
1242 if (c_rarg0 == _addr) {
1243 __ movptr(_tmp2, _addr);
1244 _addr = _tmp2;
1245 }
1246 __ movptr(c_rarg0, obj);
1247 }
1248 if (c_rarg1 != _addr) {
1249 __ movptr(c_rarg1, _addr);
1250 }
1251
1252 address entry;
1253 if (_narrow) {
1254 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
1255 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
1256 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
1257 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
1258 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
1259 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1260 }
1261 } else {
1262 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
1263 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1264 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
1265 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1266 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
1267 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1268 }
1269 }
1270 __ call(RuntimeAddress(entry), rax);
1271 assert(!save_registers.contains(_obj), "must not save result register");
1272 __ movptr(_obj, rax);
1273 }
1274 if (_narrow) {
1275 __ encode_heap_oop(_obj);
1276 }
1277
1278 __ jmp(*continuation());
1279 }
1280
1281 void ShenandoahSATBBarrierStubC2::emit_code(MacroAssembler& masm) {
1282 __ bind(*entry());
1283 Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1284 Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1285
1286 Label runtime;
1287
1288 // Do we need to load the previous value?
1289 if (_addr != noreg) {
1290 __ load_heap_oop(_preval, Address(_addr, 0), noreg, AS_RAW);
1291 }
1292 // Is the previous value null?
1293 __ cmpptr(_preval, NULL_WORD);
1294 __ jcc(Assembler::equal, *continuation());
1295
1296 // Can we store a value in the given thread's buffer?
1297 // (The index field is typed as size_t.)
1298 __ movptr(_tmp, index);
1299 __ testptr(_tmp, _tmp);
1300 __ jccb(Assembler::zero, runtime);
1301 // The buffer is not full, store value into it.
1302 __ subptr(_tmp, wordSize);
1303 __ movptr(index, _tmp);
1304 __ addptr(_tmp, buffer);
1305 __ movptr(Address(_tmp, 0), _preval);
1306
1307 __ jmp(*continuation());
1308
1309 __ bind(runtime);
1310 {
1311 SaveLiveRegisters save_registers(&masm, this);
1312 if (c_rarg0 != _preval) {
1313 __ mov(c_rarg0, _preval);
1314 }
1315 // rax is a caller-saved, non-argument-passing register, so it does not
1316 // interfere with c_rarg0 or c_rarg1. If it contained any live value before
1317 // entering this stub, it is saved at this point, and restored after the
1318 // call. If it did not contain any live value, it is free to be used. In
1319 // either case, it is safe to use it here as a call scratch register.
1320 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
1321 }
1322 __ jmp(*continuation());
1323 }
1324
1325 void ShenandoahCASBarrierSlowStubC2::emit_code(MacroAssembler& masm) {
1326 __ bind(*entry());
1327
1328 // CAS has failed because the value held at addr does not match expected.
1329 // This may be a false negative because the version in memory might be
1330 // the from-space version of the same object we currently hold to-space
1331 // reference for.
1332 //
1333 // To resolve this, we need to pass the location through the LRB fixup,
1334 // this will make sure that the location has only to-space pointers.
1335 // To avoid calling into runtime often, we cset-check the object first.
1336 // We can inline most of the work here, but there is little point,
1337 // as CAS failures over cset locations must be rare. This fast-slow split
1338 // matches what we do for normal LRB.
1339
1340 assert(_expected == rax, "expected must be rax");
1341
1342 // Non-strong references should always go to runtime. We do not expect
1343 // CASes over non-strong locations.
1344 assert((_node->barrier_data() & ShenandoahBarrierStrong) != 0, "Only strong references for CASes");
1345
1346 Label L_final;
1347
1348 // Fast-path stashed original oldval to tmp2 for us. We need to save it
1349 // for the final retry. This frees up tmp2 for cset check below.
1350 __ push(_tmp2);
1351
1352 // (Compressed) failure witness is in _expected.
1353 // Unpack it and check if it is in collection set.
1354 __ movptr(_tmp1, _expected);
1355 if (UseCompressedOops) {
1356 __ decode_heap_oop(_tmp1);
1357 }
1358 __ shrptr(_tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1359 __ movptr(_tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1360 __ movbool(_tmp1, Address(_tmp1, _tmp2, Address::times_1));
1361 __ testbool(_tmp1);
1362 __ jcc(Assembler::zero, L_final);
1363
1364 {
1365 SaveLiveRegisters save_registers(&masm, this);
1366 // Load up failure witness again.
1367 if (c_rarg0 != _expected) {
1368 __ movptr(c_rarg0, _expected);
1369 }
1370 if (UseCompressedOops) {
1371 __ decode_heap_oop(c_rarg0);
1372 }
1373 __ lea(c_rarg1, _addr);
1374
1375 if (UseCompressedOops) {
1376 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), 2);
1377 } else {
1378 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), 2);
1379 }
1380 // We have called LRB to fix up the heap location. We do not care about its result,
1381 // as we will just try to CAS the location again.
1382 }
1383
1384 __ bind(L_final);
1385
1386 // Try to CAS again with the original expected value.
1387 // At this point, there can no longer be false negatives.
1388 __ pop(_expected);
1389 __ lock();
1390 if (UseCompressedOops) {
1391 __ cmpxchgl(_new_val, _addr);
1392 } else {
1393 __ cmpxchgptr(_new_val, _addr);
1394 }
1395 if (!_cae) {
1396 assert(_result != noreg, "need result register");
1397 __ setcc(Assembler::equal, _result);
1398 } else {
1399 assert(_result == noreg, "no result expected");
1400 }
1401 __ jmp(*continuation());
1402 }
1403
1404 #undef __
1405 #define __ masm->
1406 #endif
1407
1408 #ifdef PRODUCT
1409 #define BLOCK_COMMENT(str) /* nothing */
1410 #else
1411 #define BLOCK_COMMENT(str) __ block_comment(str)
1412 #endif
1413
1414 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
1415
1416 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
1417
1418 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
1419 Register addr, Register count,
1420 Register tmp) {
1421 assert(ShenandoahCardBarrier, "Should have been checked by caller");
1422
1423 Label L_loop, L_done;
1424 const Register end = count;
1425 assert_different_registers(addr, end);
1426
1427 // Zero count? Nothing to do.
1428 __ testl(count, count);
1429 __ jccb(Assembler::zero, L_done);
1430
1431 const Register thread = r15_thread;
1432 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
1433 __ movptr(tmp, curr_ct_holder_addr);
1434
1435 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
1436 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
1437 __ shrptr(addr, CardTable::card_shift());
1438 __ shrptr(end, CardTable::card_shift());
1439 __ subptr(end, addr); // end --> cards count
1440
1441 __ addptr(addr, tmp);
1442
1443 __ BIND(L_loop);
1444 __ movb(Address(addr, count, Address::times_1), 0);
1445 __ decrement(count);
1446 __ jccb(Assembler::greaterEqual, L_loop);
1447
1448 __ BIND(L_done);
1449 }
1450
1451 #undef __
1452
1453 #ifdef COMPILER1
1454
1455 #define __ ce->masm()->
1456
1457 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
1458 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1459 // At this point we know that marking is in progress.
1460 // If do_load() is true then we have to emit the
1461 // load of the previous value; otherwise it has already
1462 // been loaded into _pre_val.
1463
1464 __ bind(*stub->entry());
1465 assert(stub->pre_val()->is_register(), "Precondition.");
1466
1467 Register pre_val_reg = stub->pre_val()->as_register();
1468
1469 if (stub->do_load()) {
1470 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
1471 }
1472
1473 __ cmpptr(pre_val_reg, NULL_WORD);
1474 __ jcc(Assembler::equal, *stub->continuation());
1475 ce->store_parameter(stub->pre_val()->as_register(), 0);
1476 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
1477 __ jmp(*stub->continuation());
1478
1479 }
1480
1481 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
1482 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1483 __ bind(*stub->entry());
1484
1485 DecoratorSet decorators = stub->decorators();
1486 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1487 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1488 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1489 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1490
1491 Register obj = stub->obj()->as_register();
1492 Register res = stub->result()->as_register();
1493 Register addr = stub->addr()->as_pointer_register();
1494 Register tmp1 = stub->tmp1()->as_register();
1495 Register tmp2 = stub->tmp2()->as_register();
1496 assert_different_registers(obj, res, addr, tmp1, tmp2);
1497
1498 Label slow_path;
1499
1500 assert(res == rax, "result must arrive in rax");
1501
1502 if (res != obj) {
1503 __ mov(res, obj);
1504 }
1505
1506 if (is_strong) {
1507 // Check for object being in the collection set.
1508 __ mov(tmp1, res);
1509 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1510 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1511 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
1512 __ testbool(tmp2);
1513 __ jcc(Assembler::zero, *stub->continuation());
1514 }
1515
1516 __ bind(slow_path);
1517 ce->store_parameter(res, 0);
1518 ce->store_parameter(addr, 1);
1519 if (is_strong) {
1520 if (is_native) {
1521 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
1522 } else {
1523 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
1524 }
1525 } else if (is_weak) {
1526 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
1527 } else {
1528 assert(is_phantom, "only remaining strength");
1529 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
1530 }
1531 __ jmp(*stub->continuation());
1532 }
1533
1534 #undef __
1535
1536 #define __ sasm->
1537
1538 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
1539 __ prologue("shenandoah_pre_barrier", false);
1540 // arg0 : previous value of memory
1541
1542 __ push(rax);
1543 __ push(rdx);
1544
1545 const Register pre_val = rax;
1546 const Register thread = r15_thread;
1547 const Register tmp = rdx;
1548
1549 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1550 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1551
1552 Label done;
1553 Label runtime;
1554
1555 // Is SATB still active?
1556 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1557 __ testb(gc_state, ShenandoahHeap::MARKING);
1558 __ jcc(Assembler::zero, done);
1559
1560 // Can we store original value in the thread's buffer?
1561
1562 __ movptr(tmp, queue_index);
1563 __ testptr(tmp, tmp);
1564 __ jcc(Assembler::zero, runtime);
1565 __ subptr(tmp, wordSize);
1566 __ movptr(queue_index, tmp);
1567 __ addptr(tmp, buffer);
1568
1569 // prev_val (rax)
1570 __ load_parameter(0, pre_val);
1571 __ movptr(Address(tmp, 0), pre_val);
1572 __ jmp(done);
1573
1574 __ bind(runtime);
1575
1576 __ save_live_registers_no_oop_map(true);
1577
1578 // load the pre-value
1579 __ load_parameter(0, rcx);
1580 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), rcx);
1581
1582 __ restore_live_registers(true);
1583
1584 __ bind(done);
1585
1586 __ pop(rdx);
1587 __ pop(rax);
1588
1589 __ epilogue();
1590 }
1591
1592 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
1593 __ prologue("shenandoah_load_reference_barrier", false);
1594 // arg0 : object to be resolved
1595
1596 __ save_live_registers_no_oop_map(true);
1597
1598 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1599 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1600 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1601 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1602
1603 __ load_parameter(0, c_rarg0);
1604 __ load_parameter(1, c_rarg1);
1605 if (is_strong) {
1606 if (is_native) {
1607 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1608 } else {
1609 if (UseCompressedOops) {
1610 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1);
1611 } else {
1612 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1613 }
1614 }
1615 } else if (is_weak) {
1616 assert(!is_native, "weak must not be called off-heap");
1617 if (UseCompressedOops) {
1618 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
1619 } else {
1620 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
1621 }
1622 } else {
1623 assert(is_phantom, "only remaining strength");
1624 assert(is_native, "phantom must only be called off-heap");
1625 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1);
1626 }
1627
1628 __ restore_live_registers_except_rax(true);
1629
1630 __ epilogue();
1631 }
1632
1633 #undef __
1634
1635 #endif // COMPILER1