1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "runtime/javaThread.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "utilities/macros.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44
45 #define __ masm->
46
47 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
48 if (handle_gpr) {
49 __ push_IU_state();
50 }
51
52 if (handle_fp) {
53 // Some paths can be reached from the c2i adapter with live fp arguments in registers.
54 assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call");
55
56 const int xmm_size = wordSize * 2;
57 __ subptr(rsp, xmm_size * 8);
58 __ movdbl(Address(rsp, xmm_size * 0), xmm0);
59 __ movdbl(Address(rsp, xmm_size * 1), xmm1);
60 __ movdbl(Address(rsp, xmm_size * 2), xmm2);
61 __ movdbl(Address(rsp, xmm_size * 3), xmm3);
62 __ movdbl(Address(rsp, xmm_size * 4), xmm4);
63 __ movdbl(Address(rsp, xmm_size * 5), xmm5);
64 __ movdbl(Address(rsp, xmm_size * 6), xmm6);
65 __ movdbl(Address(rsp, xmm_size * 7), xmm7);
66 }
67 }
68
69 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
70 if (handle_fp) {
71 const int xmm_size = wordSize * 2;
72 __ movdbl(xmm0, Address(rsp, xmm_size * 0));
73 __ movdbl(xmm1, Address(rsp, xmm_size * 1));
74 __ movdbl(xmm2, Address(rsp, xmm_size * 2));
75 __ movdbl(xmm3, Address(rsp, xmm_size * 3));
76 __ movdbl(xmm4, Address(rsp, xmm_size * 4));
77 __ movdbl(xmm5, Address(rsp, xmm_size * 5));
78 __ movdbl(xmm6, Address(rsp, xmm_size * 6));
79 __ movdbl(xmm7, Address(rsp, xmm_size * 7));
80 __ addptr(rsp, xmm_size * 8);
81 }
82
83 if (handle_gpr) {
84 __ pop_IU_state();
85 }
86 }
87
88 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
89 Register src, Register dst, Register count) {
90
91 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
92
93 if (is_reference_type(type)) {
94 if (ShenandoahCardBarrier) {
95 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
96 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
97 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
98
99 // We need to save the original element count because the array copy stub
100 // will destroy the value and we need it for the card marking barrier.
101 if (!checkcast) {
102 if (!obj_int) {
103 // Save count for barrier
104 __ movptr(r11, count);
105 } else if (disjoint) {
106 // Save dst in r11 in the disjoint case
107 __ movq(r11, dst);
108 }
109 }
110 }
111
112 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
113 Register thread = r15_thread;
114 assert_different_registers(src, dst, count, thread);
115
116 Label L_done;
117 // Short-circuit if count == 0.
118 __ testptr(count, count);
119 __ jcc(Assembler::zero, L_done);
120
121 // Avoid runtime call when not active.
122 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
123 int flags;
124 if (ShenandoahSATBBarrier && dest_uninitialized) {
125 flags = ShenandoahHeap::HAS_FORWARDED;
126 } else {
127 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
128 }
129 __ testb(gc_state, flags);
130 __ jcc(Assembler::zero, L_done);
131
132 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
133
134 assert(src == rdi, "expected");
135 assert(dst == rsi, "expected");
136 assert(count == rdx, "expected");
137 if (UseCompressedOops) {
138 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop),
139 src, dst, count);
140 } else {
141 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop),
142 src, dst, count);
143 }
144
145 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
146
147 __ bind(L_done);
148 }
149 }
150
151 }
152
153 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
154 Register src, Register dst, Register count) {
155
156 if (ShenandoahCardBarrier && is_reference_type(type)) {
157 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
158 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
159 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
160 Register tmp = rax;
161
162 if (!checkcast) {
163 if (!obj_int) {
164 // Save count for barrier
165 count = r11;
166 } else if (disjoint) {
167 // Use the saved dst in the disjoint case
168 dst = r11;
169 }
170 } else {
171 tmp = rscratch1;
172 }
173 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
174 }
175 }
176
177 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
178 Register obj,
179 Register pre_val,
180 Register tmp,
181 bool tosca_live,
182 bool expand_call) {
183
184 if (ShenandoahSATBBarrier) {
185 satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
186 }
187 }
188
189 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
190 Register obj,
191 Register pre_val,
192 Register tmp,
193 bool tosca_live,
194 bool expand_call) {
195 // If expand_call is true then we expand the call_VM_leaf macro
196 // directly to skip generating the check by
197 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
198
199 const Register thread = r15_thread;
200
201 Label done;
202 Label runtime;
203
204 assert(pre_val != noreg, "check this code");
205
206 if (obj != noreg) {
207 assert_different_registers(obj, pre_val, tmp);
208 assert(pre_val != rax, "check this code");
209 }
210
211 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
212 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
213
214 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
215 __ testb(gc_state, ShenandoahHeap::MARKING);
216 __ jcc(Assembler::zero, done);
217
218 // Do we need to load the previous value?
219 if (obj != noreg) {
220 __ load_heap_oop(pre_val, Address(obj, 0), noreg, AS_RAW);
221 }
222
223 // Is the previous value null?
224 __ cmpptr(pre_val, NULL_WORD);
225 __ jcc(Assembler::equal, done);
226
227 // Can we store original value in the thread's buffer?
228 // Is index == 0?
229 // (The index field is typed as size_t.)
230
231 __ movptr(tmp, index); // tmp := *index_adr
232 __ cmpptr(tmp, 0); // tmp == 0?
233 __ jcc(Assembler::equal, runtime); // If yes, goto runtime
234
235 __ subptr(tmp, wordSize); // tmp := tmp - wordSize
236 __ movptr(index, tmp); // *index_adr := tmp
237 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
238
239 // Record the previous value
240 __ movptr(Address(tmp, 0), pre_val);
241 __ jmp(done);
242
243 __ bind(runtime);
244 // save the live input values
245 if(tosca_live) __ push(rax);
246
247 if (obj != noreg && obj != rax)
248 __ push(obj);
249
250 if (pre_val != rax)
251 __ push(pre_val);
252
253 // Calling the runtime using the regular call_VM_leaf mechanism generates
254 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
255 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
256 //
257 // If we care generating the pre-barrier without a frame (e.g. in the
258 // intrinsified Reference.get() routine) then ebp might be pointing to
259 // the caller frame and so this check will most likely fail at runtime.
260 //
261 // Expanding the call directly bypasses the generation of the check.
262 // So when we do not have have a full interpreter frame on the stack
263 // expand_call should be passed true.
264
265 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
266 // pre_val be c_rarg1 (where the call prologue would copy thread argument).
267 // Note: this should not accidentally smash thread, because thread is always r15.
268 assert(thread != c_rarg0, "smashed arg");
269 if (c_rarg0 != pre_val) {
270 __ mov(c_rarg0, pre_val);
271 }
272
273 if (expand_call) {
274 assert(pre_val != c_rarg1, "smashed arg");
275 if (c_rarg1 != thread) {
276 __ mov(c_rarg1, thread);
277 }
278 // Already moved pre_val into c_rarg0 above
279 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), 1);
280 } else {
281 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
282 }
283
284 // save the live input values
285 if (pre_val != rax)
286 __ pop(pre_val);
287
288 if (obj != noreg && obj != rax)
289 __ pop(obj);
290
291 if(tosca_live) __ pop(rax);
292
293 __ bind(done);
294 }
295
296 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) {
297 assert(ShenandoahLoadRefBarrier, "Should be enabled");
298
299 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
300 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
301 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
302 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
303 bool is_narrow = UseCompressedOops && !is_native;
304
305 Label heap_stable, not_cset;
306
307 __ block_comment("load_reference_barrier { ");
308
309 // Check if GC is active
310 Register thread = r15_thread;
311
312 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
313 int flags = ShenandoahHeap::HAS_FORWARDED;
314 if (!is_strong) {
315 flags |= ShenandoahHeap::WEAK_ROOTS;
316 }
317 __ testb(gc_state, flags);
318 __ jcc(Assembler::zero, heap_stable);
319
320 Register tmp1 = noreg, tmp2 = noreg;
321 if (is_strong) {
322 // Test for object in cset
323 // Allocate temporary registers
324 for (int i = 0; i < 8; i++) {
325 Register r = as_Register(i);
326 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) {
327 if (tmp1 == noreg) {
328 tmp1 = r;
329 } else {
330 tmp2 = r;
331 break;
332 }
333 }
334 }
335 assert(tmp1 != noreg, "tmp1 allocated");
336 assert(tmp2 != noreg, "tmp2 allocated");
337 assert_different_registers(tmp1, tmp2, src.base(), src.index());
338 assert_different_registers(tmp1, tmp2, dst);
339
340 __ push(tmp1);
341 __ push(tmp2);
342
343 // Optimized cset-test
344 __ movptr(tmp1, dst);
345 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
346 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
347 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1));
348 __ testbool(tmp1);
349 __ jcc(Assembler::zero, not_cset);
350 }
351
352 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
353
354 // The rest is saved with the optimized path
355
356 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
357 __ subptr(rsp, num_saved_regs * wordSize);
358 uint slot = num_saved_regs;
359 if (dst != rax) {
360 __ movptr(Address(rsp, (--slot) * wordSize), rax);
361 }
362 __ movptr(Address(rsp, (--slot) * wordSize), rcx);
363 __ movptr(Address(rsp, (--slot) * wordSize), rdx);
364 __ movptr(Address(rsp, (--slot) * wordSize), rdi);
365 __ movptr(Address(rsp, (--slot) * wordSize), rsi);
366 __ movptr(Address(rsp, (--slot) * wordSize), r8);
367 __ movptr(Address(rsp, (--slot) * wordSize), r9);
368 __ movptr(Address(rsp, (--slot) * wordSize), r10);
369 __ movptr(Address(rsp, (--slot) * wordSize), r11);
370 // Save APX extended registers r16–r31 if enabled
371 if (UseAPX) {
372 __ movptr(Address(rsp, (--slot) * wordSize), r16);
373 __ movptr(Address(rsp, (--slot) * wordSize), r17);
374 __ movptr(Address(rsp, (--slot) * wordSize), r18);
375 __ movptr(Address(rsp, (--slot) * wordSize), r19);
376 __ movptr(Address(rsp, (--slot) * wordSize), r20);
377 __ movptr(Address(rsp, (--slot) * wordSize), r21);
378 __ movptr(Address(rsp, (--slot) * wordSize), r22);
379 __ movptr(Address(rsp, (--slot) * wordSize), r23);
380 __ movptr(Address(rsp, (--slot) * wordSize), r24);
381 __ movptr(Address(rsp, (--slot) * wordSize), r25);
382 __ movptr(Address(rsp, (--slot) * wordSize), r26);
383 __ movptr(Address(rsp, (--slot) * wordSize), r27);
384 __ movptr(Address(rsp, (--slot) * wordSize), r28);
385 __ movptr(Address(rsp, (--slot) * wordSize), r29);
386 __ movptr(Address(rsp, (--slot) * wordSize), r30);
387 __ movptr(Address(rsp, (--slot) * wordSize), r31);
388 }
389 // r12-r15 are callee saved in all calling conventions
390 assert(slot == 0, "must use all slots");
391
392 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1.
393 Register arg0 = c_rarg0, arg1 = c_rarg1;
394 if (dst == arg1) {
395 __ lea(arg0, src);
396 __ xchgptr(arg1, arg0);
397 } else {
398 __ lea(arg1, src);
399 __ movptr(arg0, dst);
400 }
401
402 if (is_strong) {
403 if (is_narrow) {
404 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1);
405 } else {
406 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1);
407 }
408 } else if (is_weak) {
409 if (is_narrow) {
410 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1);
411 } else {
412 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1);
413 }
414 } else {
415 assert(is_phantom, "only remaining strength");
416 assert(!is_narrow, "phantom access cannot be narrow");
417 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
418 }
419
420 // Restore APX extended registers r31–r16 if previously saved
421 if (UseAPX) {
422 __ movptr(r31, Address(rsp, (slot++) * wordSize));
423 __ movptr(r30, Address(rsp, (slot++) * wordSize));
424 __ movptr(r29, Address(rsp, (slot++) * wordSize));
425 __ movptr(r28, Address(rsp, (slot++) * wordSize));
426 __ movptr(r27, Address(rsp, (slot++) * wordSize));
427 __ movptr(r26, Address(rsp, (slot++) * wordSize));
428 __ movptr(r25, Address(rsp, (slot++) * wordSize));
429 __ movptr(r24, Address(rsp, (slot++) * wordSize));
430 __ movptr(r23, Address(rsp, (slot++) * wordSize));
431 __ movptr(r22, Address(rsp, (slot++) * wordSize));
432 __ movptr(r21, Address(rsp, (slot++) * wordSize));
433 __ movptr(r20, Address(rsp, (slot++) * wordSize));
434 __ movptr(r19, Address(rsp, (slot++) * wordSize));
435 __ movptr(r18, Address(rsp, (slot++) * wordSize));
436 __ movptr(r17, Address(rsp, (slot++) * wordSize));
437 __ movptr(r16, Address(rsp, (slot++) * wordSize));
438 }
439 __ movptr(r11, Address(rsp, (slot++) * wordSize));
440 __ movptr(r10, Address(rsp, (slot++) * wordSize));
441 __ movptr(r9, Address(rsp, (slot++) * wordSize));
442 __ movptr(r8, Address(rsp, (slot++) * wordSize));
443 __ movptr(rsi, Address(rsp, (slot++) * wordSize));
444 __ movptr(rdi, Address(rsp, (slot++) * wordSize));
445 __ movptr(rdx, Address(rsp, (slot++) * wordSize));
446 __ movptr(rcx, Address(rsp, (slot++) * wordSize));
447
448 if (dst != rax) {
449 __ movptr(dst, rax);
450 __ movptr(rax, Address(rsp, (slot++) * wordSize));
451 }
452
453 assert(slot == num_saved_regs, "must use all slots");
454 __ addptr(rsp, num_saved_regs * wordSize);
455
456 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
457
458 __ bind(not_cset);
459
460 if (is_strong) {
461 __ pop(tmp2);
462 __ pop(tmp1);
463 }
464
465 __ bind(heap_stable);
466
467 __ block_comment("} load_reference_barrier");
468 }
469
470 //
471 // Arguments:
472 //
473 // Inputs:
474 // src: oop location, might be clobbered
475 // tmp1: scratch register, might not be valid.
476 //
477 // Output:
478 // dst: oop loaded from src location
479 //
480 // Kill:
481 // tmp1 (if it is valid)
482 //
483 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
484 Register dst, Address src, Register tmp1) {
485 // 1: non-reference load, no additional barrier is needed
486 if (!is_reference_type(type)) {
487 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
488 return;
489 }
490
491 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
492
493 // 2: load a reference from src location and apply LRB if needed
494 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
495 Register result_dst = dst;
496 bool use_tmp1_for_dst = false;
497
498 // Preserve src location for LRB
499 if (dst == src.base() || dst == src.index()) {
500 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
501 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
502 dst = tmp1;
503 use_tmp1_for_dst = true;
504 } else {
505 dst = rdi;
506 __ push(dst);
507 }
508 assert_different_registers(dst, src.base(), src.index());
509 }
510
511 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
512
513 load_reference_barrier(masm, dst, src, decorators);
514
515 // Move loaded oop to final destination
516 if (dst != result_dst) {
517 __ movptr(result_dst, dst);
518
519 if (!use_tmp1_for_dst) {
520 __ pop(dst);
521 }
522
523 dst = result_dst;
524 }
525 } else {
526 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
527 }
528
529 // 3: apply keep-alive barrier if needed
530 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
531 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
532
533 assert_different_registers(dst, tmp1, r15_thread);
534 // Generate the SATB pre-barrier code to log the value of
535 // the referent field in an SATB buffer.
536 shenandoah_write_barrier_pre(masm /* masm */,
537 noreg /* obj */,
538 dst /* pre_val */,
539 tmp1 /* tmp */,
540 true /* tosca_live */,
541 true /* expand_call */);
542
543 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
544 }
545 }
546
547 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
548 assert(ShenandoahCardBarrier, "Should have been checked by caller");
549
550 // Does a store check for the oop in register obj. The content of
551 // register obj is destroyed afterwards.
552 __ shrptr(obj, CardTable::card_shift());
553
554 // We'll use this register as the TLS base address and also later on
555 // to hold the byte_map_base.
556 Register thread = r15_thread;
557 Register tmp = rscratch1;
558
559 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
560 __ movptr(tmp, curr_ct_holder_addr);
561 Address card_addr(tmp, obj, Address::times_1);
562
563 int dirty = CardTable::dirty_card_val();
564 if (UseCondCardMark) {
565 Label L_already_dirty;
566 __ cmpb(card_addr, dirty);
567 __ jccb(Assembler::equal, L_already_dirty);
568 __ movb(card_addr, dirty);
569 __ bind(L_already_dirty);
570 } else {
571 __ movb(card_addr, dirty);
572 }
573 }
574
575 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
576 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
577
578 bool on_oop = is_reference_type(type);
579 bool in_heap = (decorators & IN_HEAP) != 0;
580 bool as_normal = (decorators & AS_NORMAL) != 0;
581 if (on_oop && in_heap) {
582 bool needs_pre_barrier = as_normal;
583
584 // flatten object address if needed
585 // We do it regardless of precise because we need the registers
586 if (dst.index() == noreg && dst.disp() == 0) {
587 if (dst.base() != tmp1) {
588 __ movptr(tmp1, dst.base());
589 }
590 } else {
591 __ lea(tmp1, dst);
592 }
593
594 assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
595
596 if (needs_pre_barrier) {
597 shenandoah_write_barrier_pre(masm /*masm*/,
598 tmp1 /* obj */,
599 tmp2 /* pre_val */,
600 tmp3 /* tmp */,
601 val != noreg /* tosca_live */,
602 false /* expand_call */);
603 }
604
605 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
606 if (val != noreg) {
607 if (ShenandoahCardBarrier) {
608 store_check(masm, tmp1);
609 }
610 }
611 } else {
612 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
613 }
614 }
615
616 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
617 Register obj, Register tmp, Label& slowpath) {
618 Label done;
619 // Resolve jobject
620 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
621
622 // Check for null.
623 __ testptr(obj, obj);
624 __ jcc(Assembler::zero, done);
625
626 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
627 __ testb(gc_state, ShenandoahHeap::EVACUATION);
628 __ jccb(Assembler::notZero, slowpath);
629 __ bind(done);
630 }
631
632 // Special Shenandoah CAS implementation that handles false negatives
633 // due to concurrent evacuation.
634 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
635 Register res, Address addr, Register oldval, Register newval,
636 bool exchange, Register tmp1, Register tmp2) {
637 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
638 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
639 assert_different_registers(oldval, tmp1, tmp2);
640 assert_different_registers(newval, tmp1, tmp2);
641
642 Label L_success, L_failure;
643
644 // Remember oldval for retry logic below
645 if (UseCompressedOops) {
646 __ movl(tmp1, oldval);
647 } else {
648 __ movptr(tmp1, oldval);
649 }
650
651 // Step 1. Fast-path.
652 //
653 // Try to CAS with given arguments. If successful, then we are done.
654
655 if (UseCompressedOops) {
656 __ lock();
657 __ cmpxchgl(newval, addr);
658 } else {
659 __ lock();
660 __ cmpxchgptr(newval, addr);
661 }
662 __ jcc(Assembler::equal, L_success);
663
664 // Step 2. CAS had failed. This may be a false negative.
665 //
666 // The trouble comes when we compare the to-space pointer with the from-space
667 // pointer to the same object. To resolve this, it will suffice to resolve
668 // the value from memory -- this will give both to-space pointers.
669 // If they mismatch, then it was a legitimate failure.
670 //
671 // Before reaching to resolve sequence, see if we can avoid the whole shebang
672 // with filters.
673
674 // Filter: when offending in-memory value is null, the failure is definitely legitimate
675 __ testptr(oldval, oldval);
676 __ jcc(Assembler::zero, L_failure);
677
678 // Filter: when heap is stable, the failure is definitely legitimate
679 const Register thread = r15_thread;
680 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
681 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
682 __ jcc(Assembler::zero, L_failure);
683
684 if (UseCompressedOops) {
685 __ movl(tmp2, oldval);
686 __ decode_heap_oop(tmp2);
687 } else {
688 __ movptr(tmp2, oldval);
689 }
690
691 // Decode offending in-memory value.
692 // Test if-forwarded
693 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
694 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded
695 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded
696
697 // Load and mask forwarding pointer
698 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
699 __ shrptr(tmp2, 2);
700 __ shlptr(tmp2, 2);
701
702 if (UseCompressedOops) {
703 __ decode_heap_oop(tmp1); // decode for comparison
704 }
705
706 // Now we have the forwarded offender in tmp2.
707 // Compare and if they don't match, we have legitimate failure
708 __ cmpptr(tmp1, tmp2);
709 __ jcc(Assembler::notEqual, L_failure);
710
711 // Step 3. Need to fix the memory ptr before continuing.
712 //
713 // At this point, we have from-space oldval in the register, and its to-space
714 // address is in tmp2. Let's try to update it into memory. We don't care if it
715 // succeeds or not. If it does, then the retrying CAS would see it and succeed.
716 // If this fixup fails, this means somebody else beat us to it, and necessarily
717 // with to-space ptr store. We still have to do the retry, because the GC might
718 // have updated the reference for us.
719
720 if (UseCompressedOops) {
721 __ encode_heap_oop(tmp2); // previously decoded at step 2.
722 }
723
724 if (UseCompressedOops) {
725 __ lock();
726 __ cmpxchgl(tmp2, addr);
727 } else {
728 __ lock();
729 __ cmpxchgptr(tmp2, addr);
730 }
731
732 // Step 4. Try to CAS again.
733 //
734 // This is guaranteed not to have false negatives, because oldval is definitely
735 // to-space, and memory pointer is to-space as well. Nothing is able to store
736 // from-space ptr into memory anymore. Make sure oldval is restored, after being
737 // garbled during retries.
738 //
739 if (UseCompressedOops) {
740 __ movl(oldval, tmp2);
741 } else {
742 __ movptr(oldval, tmp2);
743 }
744
745 if (UseCompressedOops) {
746 __ lock();
747 __ cmpxchgl(newval, addr);
748 } else {
749 __ lock();
750 __ cmpxchgptr(newval, addr);
751 }
752 if (!exchange) {
753 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
754 }
755
756 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
757 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
758 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
759
760 if (exchange) {
761 __ bind(L_failure);
762 __ bind(L_success);
763 } else {
764 assert(res != noreg, "need result register");
765
766 Label exit;
767 __ bind(L_failure);
768 __ xorptr(res, res);
769 __ jmpb(exit);
770
771 __ bind(L_success);
772 __ movptr(res, 1);
773 __ bind(exit);
774 }
775 }
776
777 #ifdef PRODUCT
778 #define BLOCK_COMMENT(str) /* nothing */
779 #else
780 #define BLOCK_COMMENT(str) __ block_comment(str)
781 #endif
782
783 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
784
785 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
786
787 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
788 Register addr, Register count,
789 Register tmp) {
790 assert(ShenandoahCardBarrier, "Should have been checked by caller");
791
792 Label L_loop, L_done;
793 const Register end = count;
794 assert_different_registers(addr, end);
795
796 // Zero count? Nothing to do.
797 __ testl(count, count);
798 __ jccb(Assembler::zero, L_done);
799
800 const Register thread = r15_thread;
801 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
802 __ movptr(tmp, curr_ct_holder_addr);
803
804 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
805 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
806 __ shrptr(addr, CardTable::card_shift());
807 __ shrptr(end, CardTable::card_shift());
808 __ subptr(end, addr); // end --> cards count
809
810 __ addptr(addr, tmp);
811
812 __ BIND(L_loop);
813 __ movb(Address(addr, count, Address::times_1), 0);
814 __ decrement(count);
815 __ jccb(Assembler::greaterEqual, L_loop);
816
817 __ BIND(L_done);
818 }
819
820 #undef __
821
822 #ifdef COMPILER1
823
824 #define __ ce->masm()->
825
826 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
827 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
828 // At this point we know that marking is in progress.
829 // If do_load() is true then we have to emit the
830 // load of the previous value; otherwise it has already
831 // been loaded into _pre_val.
832
833 __ bind(*stub->entry());
834 assert(stub->pre_val()->is_register(), "Precondition.");
835
836 Register pre_val_reg = stub->pre_val()->as_register();
837
838 if (stub->do_load()) {
839 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
840 }
841
842 __ cmpptr(pre_val_reg, NULL_WORD);
843 __ jcc(Assembler::equal, *stub->continuation());
844 ce->store_parameter(stub->pre_val()->as_register(), 0);
845 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
846 __ jmp(*stub->continuation());
847
848 }
849
850 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
851 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
852 __ bind(*stub->entry());
853
854 DecoratorSet decorators = stub->decorators();
855 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
856 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
857 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
858 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
859
860 Register obj = stub->obj()->as_register();
861 Register res = stub->result()->as_register();
862 Register addr = stub->addr()->as_pointer_register();
863 Register tmp1 = stub->tmp1()->as_register();
864 Register tmp2 = stub->tmp2()->as_register();
865 assert_different_registers(obj, res, addr, tmp1, tmp2);
866
867 Label slow_path;
868
869 assert(res == rax, "result must arrive in rax");
870
871 if (res != obj) {
872 __ mov(res, obj);
873 }
874
875 if (is_strong) {
876 // Check for object being in the collection set.
877 __ mov(tmp1, res);
878 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
879 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
880 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
881 __ testbool(tmp2);
882 __ jcc(Assembler::zero, *stub->continuation());
883 }
884
885 __ bind(slow_path);
886 ce->store_parameter(res, 0);
887 ce->store_parameter(addr, 1);
888 if (is_strong) {
889 if (is_native) {
890 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
891 } else {
892 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
893 }
894 } else if (is_weak) {
895 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
896 } else {
897 assert(is_phantom, "only remaining strength");
898 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
899 }
900 __ jmp(*stub->continuation());
901 }
902
903 #undef __
904
905 #define __ sasm->
906
907 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
908 __ prologue("shenandoah_pre_barrier", false);
909 // arg0 : previous value of memory
910
911 __ push(rax);
912 __ push(rdx);
913
914 const Register pre_val = rax;
915 const Register thread = r15_thread;
916 const Register tmp = rdx;
917
918 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
919 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
920
921 Label done;
922 Label runtime;
923
924 // Is SATB still active?
925 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
926 __ testb(gc_state, ShenandoahHeap::MARKING);
927 __ jcc(Assembler::zero, done);
928
929 // Can we store original value in the thread's buffer?
930
931 __ movptr(tmp, queue_index);
932 __ testptr(tmp, tmp);
933 __ jcc(Assembler::zero, runtime);
934 __ subptr(tmp, wordSize);
935 __ movptr(queue_index, tmp);
936 __ addptr(tmp, buffer);
937
938 // prev_val (rax)
939 __ load_parameter(0, pre_val);
940 __ movptr(Address(tmp, 0), pre_val);
941 __ jmp(done);
942
943 __ bind(runtime);
944
945 __ save_live_registers_no_oop_map(true);
946
947 // load the pre-value
948 __ load_parameter(0, rcx);
949 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), rcx);
950
951 __ restore_live_registers(true);
952
953 __ bind(done);
954
955 __ pop(rdx);
956 __ pop(rax);
957
958 __ epilogue();
959 }
960
961 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
962 __ prologue("shenandoah_load_reference_barrier", false);
963 // arg0 : object to be resolved
964
965 __ save_live_registers_no_oop_map(true);
966
967 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
968 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
969 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
970 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
971
972 __ load_parameter(0, c_rarg0);
973 __ load_parameter(1, c_rarg1);
974 if (is_strong) {
975 if (is_native) {
976 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
977 } else {
978 if (UseCompressedOops) {
979 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1);
980 } else {
981 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
982 }
983 }
984 } else if (is_weak) {
985 assert(!is_native, "weak must not be called off-heap");
986 if (UseCompressedOops) {
987 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
988 } else {
989 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
990 }
991 } else {
992 assert(is_phantom, "only remaining strength");
993 assert(is_native, "phantom must only be called off-heap");
994 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1);
995 }
996
997 __ restore_live_registers_except_rax(true);
998
999 __ epilogue();
1000 }
1001
1002 #undef __
1003
1004 #endif // COMPILER1