1 /*
2 * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
4 * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
31 #include "gc/shenandoah/shenandoahForwarding.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
34 #include "gc/shenandoah/shenandoahRuntime.hpp"
35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #ifdef COMPILER1
41 #include "c1/c1_LIRAssembler.hpp"
42 #include "c1/c1_MacroAssembler.hpp"
43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
44 #endif
45 #ifdef COMPILER2
46 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
47 #include "opto/output.hpp"
48 #include "utilities/population_count.hpp"
49 #include "utilities/powerOfTwo.hpp"
50 #endif
51
52 #define __ masm->
53
54 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
55 Register src, Register dst, Register count, RegSet saved_regs) {
56 if (is_oop) {
57 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
58 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
59
60 Label done;
61
62 // Avoid calling runtime if count == 0
63 __ beqz(count, done);
64
65 // Is GC active?
66 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
67 assert_different_registers(src, dst, count, t0);
68
69 assert(!saved_regs.contains(t0), "Sanity: about to clobber t0");
70
71 __ lbu(t0, gc_state);
72 if (ShenandoahSATBBarrier && dest_uninitialized) {
73 __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS);
74 __ beqz(t0, done);
75 } else {
76 __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
77 __ beqz(t0, done);
78 }
79
80 __ push_call_clobbered_registers();
81 // If arguments are not in proper places, shuffle them.
82 // Doing this via the stack is the most straight-forward way to avoid
83 // accidentally smashing any register.
84 if (c_rarg0 != src || c_rarg1 != dst || c_rarg2 != count) {
85 __ push_reg(RegSet::of(src), sp);
86 __ push_reg(RegSet::of(dst), sp);
87 __ push_reg(RegSet::of(count), sp);
88 __ pop_reg(RegSet::of(c_rarg2), sp);
89 __ pop_reg(RegSet::of(c_rarg1), sp);
90 __ pop_reg(RegSet::of(c_rarg0), sp);
91 }
92 address target = nullptr;
93 if (UseCompressedOops) {
94 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop);
95 } else {
96 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop);
97 }
98 __ call_VM_leaf(target, 3);
99 __ pop_call_clobbered_registers();
100 __ bind(done);
101 }
102 }
103 }
104
105 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
106 Register start, Register count, Register tmp) {
107 if (ShenandoahCardBarrier && is_oop) {
108 gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp);
109 }
110 }
111
112 void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
113 Register obj,
114 Register pre_val,
115 Register thread,
116 Register tmp1,
117 Register tmp2) {
118 assert(ShenandoahSATBBarrier, "Should be checked by caller");
119 assert(thread == xthread, "must be");
120
121 Label done;
122 Label runtime;
123
124 assert_different_registers(obj, pre_val, tmp1, tmp2);
125 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
126
127 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
128 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
129
130 // Is marking active?
131 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
132 __ lbu(t1, gc_state);
133 __ test_bit(t1, t1, ShenandoahHeap::MARKING_BITPOS);
134 __ beqz(t1, done);
135
136 // Do we need to load the previous value?
137 if (obj != noreg) {
138 if (UseCompressedOops) {
139 __ lwu(pre_val, Address(obj, 0));
140 __ decode_heap_oop(pre_val);
141 } else {
142 __ ld(pre_val, Address(obj, 0));
143 }
144 }
145
146 // Is the previous value null?
147 __ beqz(pre_val, done);
148
149 // Can we store original value in the thread's buffer?
150 // Is index == 0?
151 // (The index field is typed as size_t.)
152 __ ld(tmp1, index); // tmp := *index_adr
153 __ beqz(tmp1, runtime); // tmp == 0? If yes, goto runtime
154
155 __ subi(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
156 __ sd(tmp1, index); // *index_adr := tmp
157 __ ld(tmp2, buffer);
158 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
159
160 // Record the previous value
161 __ sd(pre_val, Address(tmp1, 0));
162 __ j(done);
163
164 // Slow-path call.
165 __ bind(runtime);
166 __ enter();
167 __ push_call_clobbered_registers();
168 if (c_rarg0 != pre_val) {
169 __ mv(c_rarg0, pre_val);
170 }
171 // Calling with super_call_VM_leaf with c_rarg0 bypasses interpreter checks and avoids any moves.
172 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
173 __ pop_call_clobbered_registers();
174 __ leave();
175
176 __ bind(done);
177 }
178
179 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
180 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
181
182 Label is_null;
183 __ beqz(dst, is_null);
184 resolve_forward_pointer_not_null(masm, dst, tmp);
185 __ bind(is_null);
186 }
187
188 // IMPORTANT: This must preserve all registers, even t0 and t1, except those explicitly
189 // passed in.
190 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
191 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
192 // The below loads the mark word, checks if the lowest two bits are
193 // set, and if so, clear the lowest two bits and copy the result
194 // to dst. Otherwise it leaves dst alone.
195 // Implementing this is surprisingly awkward. I do it here by:
196 // - Inverting the mark word
197 // - Test lowest two bits == 0
198 // - If so, set the lowest two bits
199 // - Invert the result back, and copy to dst
200 RegSet saved_regs = RegSet::of(t2);
201 bool borrow_reg = (tmp == noreg);
202 if (borrow_reg) {
203 // No free registers available. Make one useful.
204 tmp = t0;
205 if (tmp == dst) {
206 tmp = t1;
207 }
208 saved_regs += RegSet::of(tmp);
209 }
210
211 assert_different_registers(tmp, dst, t2);
212 __ push_reg(saved_regs, sp);
213
214 Label done;
215 __ ld(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
216 __ xori(tmp, tmp, -1); // eon with 0 is equivalent to XOR with -1
217 __ andi(t2, tmp, markWord::lock_mask_in_place);
218 __ bnez(t2, done);
219 __ ori(tmp, tmp, markWord::marked_value);
220 __ xori(dst, tmp, -1); // eon with 0 is equivalent to XOR with -1
221 __ bind(done);
222
223 __ pop_reg(saved_regs, sp);
224 }
225
226 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
227 Register dst,
228 Address load_addr,
229 DecoratorSet decorators) {
230 assert(ShenandoahLoadRefBarrier, "Should be enabled");
231 assert(dst != t1 && load_addr.base() != t1, "need t1");
232 assert_different_registers(load_addr.base(), t0, t1);
233
234 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
235 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
236 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
237 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
238 bool is_narrow = UseCompressedOops && !is_native;
239
240 Label heap_stable, not_cset;
241 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
242 __ lbu(t1, gc_state);
243
244 // Check for heap stability
245 if (is_strong) {
246 __ test_bit(t1, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS);
247 __ beqz(t1, heap_stable);
248 } else {
249 Label lrb;
250 __ test_bit(t0, t1, ShenandoahHeap::WEAK_ROOTS_BITPOS);
251 __ bnez(t0, lrb);
252 __ test_bit(t0, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS);
253 __ beqz(t0, heap_stable);
254 __ bind(lrb);
255 }
256
257 // use x11 for load address
258 Register result_dst = dst;
259 if (dst == x11) {
260 __ mv(t1, dst);
261 dst = t1;
262 }
263
264 // Save x10 and x11, unless it is an output register
265 RegSet saved_regs = RegSet::of(x10, x11) - result_dst;
266 __ push_reg(saved_regs, sp);
267 __ la(x11, load_addr);
268 __ mv(x10, dst);
269
270 // Test for in-cset
271 if (is_strong) {
272 __ mv(t1, ShenandoahHeap::in_cset_fast_test_addr());
273 __ srli(t0, x10, ShenandoahHeapRegion::region_size_bytes_shift_jint());
274 __ add(t1, t1, t0);
275 __ lbu(t1, Address(t1));
276 __ test_bit(t0, t1, 0);
277 __ beqz(t0, not_cset);
278 }
279
280 // Slow-path call
281 __ enter();
282 __ push_call_clobbered_registers();
283 address target = nullptr;
284 if (is_strong) {
285 if (is_narrow) {
286 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
287 } else {
288 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
289 }
290 } else if (is_weak) {
291 if (is_narrow) {
292 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
293 } else {
294 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
295 }
296 } else {
297 assert(is_phantom, "only remaining strength");
298 assert(!is_narrow, "phantom access cannot be narrow");
299 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
300 }
301 // Calling with super_call_VM_leaf with c_rarg0/1 bypasses interpreter checks and avoids any moves.
302 __ super_call_VM_leaf(target, c_rarg0, c_rarg1);
303 __ mv(t0, x10);
304 __ pop_call_clobbered_registers();
305 __ mv(x10, t0);
306 __ leave();
307
308 __ bind(not_cset);
309 __ mv(result_dst, x10);
310 __ pop_reg(saved_regs, sp);
311
312 __ bind(heap_stable);
313 }
314
315 //
316 // Arguments:
317 //
318 // Inputs:
319 // src: oop location to load from, might be clobbered
320 //
321 // Output:
322 // dst: oop loaded from src location
323 //
324 // Kill:
325 // x30 (tmp reg)
326 //
327 // Alias:
328 // dst: x30 (might use x30 as temporary output register to avoid clobbering src)
329 //
330 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
331 DecoratorSet decorators,
332 BasicType type,
333 Register dst,
334 Address src,
335 Register tmp1,
336 Register tmp2) {
337 // 1: non-reference load, no additional barrier is needed
338 if (!is_reference_type(type)) {
339 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
340 return;
341 }
342
343 // 2: load a reference from src location and apply LRB if needed
344 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
345 Register result_dst = dst;
346
347 // Preserve src location for LRB
348 RegSet saved_regs;
349 if (dst == src.base()) {
350 dst = (src.base() == x28) ? x29 : x28;
351 saved_regs = RegSet::of(dst);
352 __ push_reg(saved_regs, sp);
353 }
354 assert_different_registers(dst, src.base());
355
356 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
357
358 load_reference_barrier(masm, dst, src, decorators);
359
360 if (dst != result_dst) {
361 __ mv(result_dst, dst);
362 dst = result_dst;
363 }
364
365 if (saved_regs.bits() != 0) {
366 __ pop_reg(saved_regs, sp);
367 }
368 } else {
369 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
370 }
371
372 // 3: apply keep-alive barrier if needed
373 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
374 satb_barrier(masm /* masm */,
375 noreg /* obj */,
376 dst /* pre_val */,
377 xthread /* thread */,
378 tmp1 /* tmp1 */,
379 tmp2 /* tmp2 */);
380 }
381 }
382
383 void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
384 assert(ShenandoahCardBarrier, "Should have been checked by caller");
385
386 __ srli(obj, obj, CardTable::card_shift());
387
388 assert(CardTable::dirty_card_val() == 0, "must be");
389
390 Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
391 __ ld(t1, curr_ct_holder_addr);
392 __ add(t1, obj, t1);
393
394 if (UseCondCardMark) {
395 Label L_already_dirty;
396 __ lbu(t0, Address(t1));
397 __ beqz(t0, L_already_dirty);
398 __ sb(zr, Address(t1));
399 __ bind(L_already_dirty);
400 } else {
401 __ sb(zr, Address(t1));
402 }
403 }
404
405 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
406 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
407 // 1: non-reference types require no barriers
408 if (!is_reference_type(type)) {
409 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
410 return;
411 }
412
413 // Flatten object address right away for simplicity: likely needed by barriers
414 if (dst.offset() == 0) {
415 if (dst.base() != tmp3) {
416 __ mv(tmp3, dst.base());
417 }
418 } else {
419 __ la(tmp3, dst);
420 }
421
422 // 2: pre-barrier: SATB needs the previous value
423 if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
424 satb_barrier(masm,
425 tmp3 /* obj */,
426 tmp2 /* pre_val */,
427 xthread /* thread */,
428 tmp1 /* tmp */,
429 t0 /* tmp2 */);
430 }
431
432 // Store!
433 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
434
435 // 3: post-barrier: card barrier needs store address
436 bool storing_non_null = (val != noreg);
437 if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
438 card_barrier(masm, tmp3);
439 }
440 }
441
442 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
443 Register obj, Register tmp, Label& slowpath) {
444 Label done;
445 // Resolve jobject
446 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
447
448 // Check for null.
449 __ beqz(obj, done);
450
451 assert(obj != t1, "need t1");
452 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
453 __ lbu(t1, gc_state);
454
455 // Check for heap in evacuation phase
456 __ test_bit(t0, t1, ShenandoahHeap::EVACUATION_BITPOS);
457 __ bnez(t0, slowpath);
458
459 __ bind(done);
460 }
461
462 void ShenandoahBarrierSetAssembler::try_peek_weak_handle_in_nmethod(MacroAssembler *masm, Register weak_handle,
463 Register obj, Register tmp, Label& slow_path) {
464 assert_different_registers(weak_handle, tmp, noreg);
465 assert_different_registers(obj, tmp, noreg);
466
467
468 Label done;
469
470 // Peek weak handle using the standard implementation.
471 BarrierSetAssembler::try_peek_weak_handle_in_nmethod(masm, weak_handle, obj, tmp, slow_path);
472
473 // Check if the reference is null, and if it is, take the fast path.
474 __ beqz(obj, done);
475
476 Address gc_state(xthread, ShenandoahThreadLocalData::gc_state_offset());
477 __ lbu(tmp, gc_state);
478
479 // Check if the heap is under weak-reference/roots processing, in
480 // which case we need to take the slow path.
481 __ test_bit(tmp, tmp, ShenandoahHeap::WEAK_ROOTS_BITPOS);
482 __ bnez(tmp, slow_path);
483 __ bind(done);
484 }
485
486 // Special Shenandoah CAS implementation that handles false negatives due
487 // to concurrent evacuation. The service is more complex than a
488 // traditional CAS operation because the CAS operation is intended to
489 // succeed if the reference at addr exactly matches expected or if the
490 // reference at addr holds a pointer to a from-space object that has
491 // been relocated to the location named by expected. There are two
492 // races that must be addressed:
493 // a) A parallel thread may mutate the contents of addr so that it points
494 // to a different object. In this case, the CAS operation should fail.
495 // b) A parallel thread may heal the contents of addr, replacing a
496 // from-space pointer held in addr with the to-space pointer
497 // representing the new location of the object.
498 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
499 // or it refers to an object that is not being evacuated out of
500 // from-space, or it refers to the to-space version of an object that
501 // is being evacuated out of from-space.
502 //
503 // By default the value held in the result register following execution
504 // of the generated code sequence is 0 to indicate failure of CAS,
505 // non-zero to indicate success. If is_cae, the result is the value most
506 // recently fetched from addr rather than a boolean success indicator.
507 //
508 // Clobbers t0, t1
509 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
510 Register addr,
511 Register expected,
512 Register new_val,
513 Assembler::Aqrl acquire,
514 Assembler::Aqrl release,
515 bool is_cae,
516 Register result) {
517 bool is_narrow = UseCompressedOops;
518 Assembler::operand_size size = is_narrow ? Assembler::uint32 : Assembler::int64;
519
520 assert_different_registers(addr, expected, t0, t1);
521 assert_different_registers(addr, new_val, t0, t1);
522
523 Label retry, success, fail, done;
524
525 __ bind(retry);
526
527 // Step1: Try to CAS.
528 __ cmpxchg(addr, expected, new_val, size, acquire, release, /* result */ t1);
529
530 // If success, then we are done.
531 __ beq(expected, t1, success);
532
533 // Step2: CAS failed, check the forwarded pointer.
534 __ mv(t0, t1);
535
536 if (is_narrow) {
537 __ decode_heap_oop(t0, t0);
538 }
539 resolve_forward_pointer(masm, t0);
540
541 __ encode_heap_oop(t0, t0);
542
543 // Report failure when the forwarded oop was not expected.
544 __ bne(t0, expected, fail);
545
546 // Step 3: CAS again using the forwarded oop.
547 __ cmpxchg(addr, t1, new_val, size, acquire, release, /* result */ t0);
548
549 // Retry when failed.
550 __ bne(t0, t1, retry);
551
552 __ bind(success);
553 if (is_cae) {
554 __ mv(result, expected);
555 } else {
556 __ mv(result, 1);
557 }
558 __ j(done);
559
560 __ bind(fail);
561 if (is_cae) {
562 __ mv(result, t0);
563 } else {
564 __ mv(result, zr);
565 }
566
567 __ bind(done);
568 }
569
570 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
571 Register start, Register count, Register tmp) {
572 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
573
574 Label L_loop, L_done;
575 const Register end = count;
576
577 // Zero count? Nothing to do.
578 __ beqz(count, L_done);
579
580 // end = start + count << LogBytesPerHeapOop
581 // last element address to make inclusive
582 __ shadd(end, count, start, tmp, LogBytesPerHeapOop);
583 __ subi(end, end, BytesPerHeapOop);
584 __ srli(start, start, CardTable::card_shift());
585 __ srli(end, end, CardTable::card_shift());
586
587 // number of bytes to copy
588 __ sub(count, end, start);
589
590 Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
591 __ ld(tmp, curr_ct_holder_addr);
592 __ add(start, start, tmp);
593
594 __ bind(L_loop);
595 __ add(tmp, start, count);
596 __ sb(zr, Address(tmp));
597 __ subi(count, count, 1);
598 __ bgez(count, L_loop);
599 __ bind(L_done);
600 }
601
602 #undef __
603
604 #ifdef COMPILER1
605
606 #define __ ce->masm()->
607
608 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
609 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
610 // At this point we know that marking is in progress.
611 // If do_load() is true then we have to emit the
612 // load of the previous value; otherwise it has already
613 // been loaded into _pre_val.
614 __ bind(*stub->entry());
615
616 assert(stub->pre_val()->is_register(), "Precondition.");
617
618 Register pre_val_reg = stub->pre_val()->as_register();
619
620 if (stub->do_load()) {
621 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */);
622 }
623 __ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true);
624 ce->store_parameter(stub->pre_val()->as_register(), 0);
625 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
626 __ j(*stub->continuation());
627 }
628
629 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce,
630 ShenandoahLoadReferenceBarrierStub* stub) {
631 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
632 __ bind(*stub->entry());
633
634 DecoratorSet decorators = stub->decorators();
635 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
636 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
637 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
638 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
639
640 Register obj = stub->obj()->as_register();
641 Register res = stub->result()->as_register();
642 Register addr = stub->addr()->as_pointer_register();
643 Register tmp1 = stub->tmp1()->as_register();
644 Register tmp2 = stub->tmp2()->as_register();
645
646 assert(res == x10, "result must arrive in x10");
647 assert_different_registers(tmp1, tmp2, t0);
648
649 if (res != obj) {
650 __ mv(res, obj);
651 }
652
653 if (is_strong) {
654 // Check for object in cset.
655 __ mv(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
656 __ srli(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
657 __ add(tmp2, tmp2, tmp1);
658 __ lbu(tmp2, Address(tmp2));
659 __ beqz(tmp2, *stub->continuation(), true /* is_far */);
660 }
661
662 ce->store_parameter(res, 0);
663 ce->store_parameter(addr, 1);
664
665 if (is_strong) {
666 if (is_native) {
667 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
668 } else {
669 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
670 }
671 } else if (is_weak) {
672 __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
673 } else {
674 assert(is_phantom, "only remaining strength");
675 __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
676 }
677
678 __ j(*stub->continuation());
679 }
680
681 #undef __
682
683 #define __ sasm->
684
685 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
686 __ prologue("shenandoah_pre_barrier", false);
687
688 // arg0 : previous value of memory
689
690 BarrierSet* bs = BarrierSet::barrier_set();
691
692 const Register pre_val = x10;
693 const Register thread = xthread;
694 const Register tmp = t0;
695
696 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
697 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
698
699 Label done;
700 Label runtime;
701
702 // Is marking still active?
703 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
704 __ lb(tmp, gc_state);
705 __ test_bit(tmp, tmp, ShenandoahHeap::MARKING_BITPOS);
706 __ beqz(tmp, done);
707
708 // Can we store original value in the thread's buffer?
709 __ ld(tmp, queue_index);
710 __ beqz(tmp, runtime);
711
712 __ subi(tmp, tmp, wordSize);
713 __ sd(tmp, queue_index);
714 __ ld(t1, buffer);
715 __ add(tmp, tmp, t1);
716 __ load_parameter(0, t1);
717 __ sd(t1, Address(tmp, 0));
718 __ j(done);
719
720 __ bind(runtime);
721 __ push_call_clobbered_registers();
722 __ load_parameter(0, pre_val);
723 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
724 __ pop_call_clobbered_registers();
725 __ bind(done);
726
727 __ epilogue();
728 }
729
730 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm,
731 DecoratorSet decorators) {
732 __ prologue("shenandoah_load_reference_barrier", false);
733 // arg0 : object to be resolved
734
735 __ push_call_clobbered_registers();
736 __ load_parameter(0, x10);
737 __ load_parameter(1, x11);
738
739 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
740 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
741 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
742 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
743 address target = nullptr;
744 if (is_strong) {
745 if (is_native) {
746 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
747 } else {
748 if (UseCompressedOops) {
749 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
750 } else {
751 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
752 }
753 }
754 } else if (is_weak) {
755 assert(!is_native, "weak must not be called off-heap");
756 if (UseCompressedOops) {
757 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
758 } else {
759 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
760 }
761 } else {
762 assert(is_phantom, "only remaining strength");
763 assert(is_native, "phantom must only be called off-heap");
764 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
765 }
766 __ rt_call(target);
767 __ mv(t0, x10);
768 __ pop_call_clobbered_registers();
769 __ mv(x10, t0);
770
771 __ epilogue();
772 }
773
774 #undef __
775
776 #endif // COMPILER1
777
778 #ifdef COMPILER2
779
780 #undef __
781 #define __ masm.
782
783 int ShenandoahBarrierStubC2::available_gp_registers() {
784 return Register::number_of_registers;
785 }
786
787 bool ShenandoahBarrierStubC2::is_special_register(Register r) {
788 return r == fp || r == sp ||
789 r == xheapbase || r == xthread ||
790 r == t0 || r == t1 || r == zr;
791 }
792
793 void ShenandoahBarrierStubC2::enter_if_gc_state(MacroAssembler& masm, const char test_state, Register tmp) {
794 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
795
796 Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(test_state)));
797 __ lbu(t0, gc_state_fast);
798 __ beqz(t0, *continuation());
799 __ j(*entry());
800
801 // This is were the slowpath stub will return to or the code above will
802 // jump to if the checks are false
803 __ bind(*continuation());
804 }
805
806 #undef __
807 #define __ masm->
808
809 void ShenandoahBarrierSetAssembler::compare_and_set_c2(const MachNode* node, MacroAssembler* masm, Register res, Register addr,
810 Register oldval, Register newval, Register tmp, bool exchange, bool narrow, bool is_acquire) {
811 const Assembler::Aqrl acquire = is_acquire ? Assembler::aq : Assembler::relaxed;
812 const Assembler::Aqrl release = Assembler::rl;
813
814 // Pre-barrier covers several things:
815 // a. Avoids false positives from CAS encountering to-space memory values.
816 // b. Satisfies the need for LRB for the CAE result.
817 // c. Records old value for the sake of SATB.
818 //
819 // (a) and (b) are covered because load barrier does memory location fixup.
820 // (c) is covered by KA on the current memory value.
821 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
822 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, Address(addr, 0), narrow, /* do_load: */ true);
823 char check = 0;
824 check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
825 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
826 assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for CAS/CAE");
827 stub->enter_if_gc_state(*masm, check);
828 }
829
830 // Existing RISCV cmpxchg_oop already handles Shenandoah forwarded-value retry logic.
831 // It returns:
832 // - boolean 0/1 for CAS (!exchange)
833 // - loaded/current value for CAE (exchange)
834 ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, addr, oldval, newval, acquire, release, exchange /* is_cae */, res);
835
836 // Post-barrier deals with card updates.
837 card_barrier_c2(node, masm, Address(addr, 0));
838 }
839
840 void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register preval,
841 Register newval, Register addr, Register tmp, bool is_acquire) {
842 const bool is_narrow = node->bottom_type()->isa_narrowoop();
843
844 // Pre-barrier covers several things:
845 // a. Satisfies the need for LRB for the GAS result.
846 // b. Records old value for the sake of SATB.
847 //
848 // (a) is covered because load barrier does memory location fixup.
849 // (b) is covered by KA on the current memory value.
850 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
851 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, Address(addr, 0), is_narrow, /* do_load: */ true);
852 char check = 0;
853 check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
854 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
855 assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for GAS");
856 stub->enter_if_gc_state(*masm, check);
857 }
858
859 if (is_narrow) {
860 if (is_acquire) {
861 __ atomic_xchgalwu(preval, newval, addr);
862 } else {
863 __ atomic_xchgwu(preval, newval, addr);
864 }
865 } else {
866 if (is_acquire) {
867 __ atomic_xchgal(preval, newval, addr);
868 } else {
869 __ atomic_xchg(preval, newval, addr);
870 }
871 }
872
873 // Post-barrier deals with card updates.
874 card_barrier_c2(node, masm, Address(addr, 0));
875 }
876
877 void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm, Address dst, bool dst_narrow,
878 Register src, bool src_narrow, Register tmp) {
879
880 // Pre-barrier: SATB / keep-alive on current value in memory.
881 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
882 assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier(node), "Should not be required for stores");
883 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, dst, dst_narrow, /* do_load: */ true);
884 stub->enter_if_gc_state(*masm, ShenandoahHeap::MARKING);
885 }
886
887 // Do the actual store
888 if (dst_narrow) {
889 if (!src_narrow) {
890 // Need to encode into tmp, because we cannot clobber src.
891 assert(tmp != noreg, "need temp register");
892 if (ShenandoahBarrierStubC2::maybe_null(node)) {
893 __ encode_heap_oop(tmp, src);
894 } else {
895 __ encode_heap_oop_not_null(tmp, src);
896 }
897 src = tmp;
898 }
899 __ sw(src, dst);
900 } else {
901 __ sd(src, dst);
902 }
903
904 // Post-barrier: card updates.
905 card_barrier_c2(node, masm, dst);
906 }
907
908 void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src, bool is_narrow) {
909 // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
910 if (is_narrow) {
911 __ lwu(dst, src);
912 } else {
913 __ ld(dst, src);
914 }
915
916 // Post-barrier: LRB / KA / weak-root processing.
917 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
918 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, dst, src, is_narrow, /* do_load: */ false);
919 char check = 0;
920 check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
921 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
922 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
923 stub->enter_if_gc_state(*masm, check);
924 }
925 }
926
927 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Address address) {
928 if (!ShenandoahBarrierStubC2::needs_card_barrier(node)) {
929 return;
930 }
931
932 assert(CardTable::dirty_card_val() == 0, "must be");
933 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
934
935 // t0 = card table base (holder)
936 Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
937 __ ld(t0, curr_ct_holder_addr);
938
939 // t1 = effective address
940 __ la(t1, address);
941
942 // t1 = &card_table[ addr >> CardTable::card_shift() ] ; card index
943 __ srli(t1, t1, CardTable::card_shift());
944 __ add(t1, t1, t0);
945
946 if (UseCondCardMark) {
947 Label L_already_dirty;
948 __ lbu(t0, Address(t1));
949 __ beqz(t0, L_already_dirty);
950 __ sb(zr, Address(t1));
951 __ bind(L_already_dirty);
952 } else {
953 __ sb(zr, Address(t1));
954 }
955 }
956
957 #undef __
958 #define __ masm.
959
960 void ShenandoahBarrierStubC2::post_init() {
961 // If we are in scratch emit mode we assume worst case,
962 // and force the use of trampolines
963 PhaseOutput* const output = Compile::current()->output();
964 if (output->in_scratch_emit_size()) {
965 return;
966 }
967 }
968
969 void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
970 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
971 assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
972
973 __ bind(*entry());
974
975 // If we need to load ourselves, do it here.
976 if (_do_load) {
977 if (_narrow) {
978 __ lwu(_obj, _addr);
979 } else {
980 __ ld(_obj, _addr);
981 }
982 }
983
984 // If the object is null, there is no point in applying barriers.
985 maybe_far_jump_if_zero(masm, _obj, continuation());
986
987 // Go for barriers. Barriers can return straight to continuation, as long
988 // as another barrier is not needed and we can reach the fastpath.
989 if (_needs_keep_alive_barrier && _needs_load_ref_barrier) {
990 keepalive(masm, nullptr);
991 lrb(masm, continuation());
992 } else if (_needs_keep_alive_barrier) {
993 keepalive(masm, continuation());
994 } else if (_needs_load_ref_barrier) {
995 lrb(masm, continuation());
996 } else {
997 ShouldNotReachHere();
998 }
999 }
1000
1001 void ShenandoahBarrierStubC2::maybe_far_jump_if_zero(MacroAssembler& masm, Register reg, Label* L_done) {
1002 Label L_short_jump;
1003 __ bnez(reg, L_short_jump);
1004 __ j(*L_done);
1005 __ bind(L_short_jump);
1006 }
1007
1008 void ShenandoahBarrierStubC2::keepalive(MacroAssembler& masm, Label* L_done) {
1009 Address index(xthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1010 Address buffer(xthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1011
1012 Label L_through, L_slowpath;
1013
1014 Register tmp1 = t0;
1015 Register tmp2 = t1;
1016 assert_different_registers(tmp1, tmp2, _obj, _addr.base(), _addr.index());
1017
1018 // If another barrier is enabled as well, do a runtime check for a specific barrier.
1019 if (_needs_load_ref_barrier) {
1020 Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(ShenandoahHeap::MARKING)));
1021 __ lbu(t0, gc_state_fast);
1022 if (L_done != nullptr) {
1023 maybe_far_jump_if_zero(masm, tmp1, L_done);
1024 } else {
1025 __ beqz(tmp1, L_through);
1026 }
1027 }
1028
1029 // Fast-path: put object into buffer.
1030 // If buffer is already full, go slow.
1031 __ ld(tmp1, index);
1032 __ beqz(tmp1, L_slowpath);
1033 __ subi(tmp1, tmp1, wordSize);
1034 __ sd(tmp1, index);
1035 __ ld(tmp2, buffer);
1036
1037 // If object is narrow, we need to unpack it before inserting into buffer.
1038 __ add(tmp1, tmp1, tmp2);
1039 if (_narrow) {
1040 __ decode_heap_oop_not_null(tmp2, _obj);
1041 __ sd(tmp2, Address(tmp1));
1042 } else {
1043 __ sd(_obj, Address(tmp1));
1044 }
1045
1046 // Fast-path exits here.
1047 if (L_done != nullptr) {
1048 __ j(*L_done);
1049 } else {
1050 __ j(L_through);
1051 }
1052
1053 // Slow-path: call runtime to handle.
1054 __ bind(L_slowpath);
1055
1056 // If this stub also supports LRB then we need to preserve _obj to use it there.
1057 if (_needs_load_ref_barrier) {
1058 preserve(_obj);
1059 } else {
1060 dont_preserve(_obj);
1061 }
1062
1063 {
1064 SaveLiveRegisters slr(&masm, this);
1065
1066 // Go to runtime and handle the rest there.
1067 __ mv(c_rarg0, _obj);
1068 __ rt_call(keepalive_runtime_entry_addr());
1069 }
1070
1071 if (L_done != nullptr) {
1072 __ j(*L_done);
1073 } else {
1074 __ bind(L_through);
1075 }
1076 }
1077
1078 void ShenandoahBarrierStubC2::lrb(MacroAssembler& masm, Label* L_done) {
1079 assert(L_done != nullptr, "Must be set");
1080
1081 Label L_slow;
1082
1083 Register tmp1 = t0;
1084 Register tmp2 = t1;
1085 assert_different_registers(tmp1, tmp2, _obj, _addr.base(), _addr.index());
1086
1087 // If another barrier is enabled as well, do a runtime check for a specific barrier.
1088 if (_needs_keep_alive_barrier) {
1089 char state_to_check = ShenandoahHeap::HAS_FORWARDED | (_needs_load_ref_weak_barrier ? ShenandoahHeap::WEAK_ROOTS : 0);
1090 Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(state_to_check)));
1091 __ lbu(tmp1, gc_state_fast);
1092 maybe_far_jump_if_zero(masm, tmp1, L_done);
1093 }
1094
1095 // If weak references are being processed, weak/phantom loads need to go slow,
1096 // regardless of their cset status.
1097 if (_needs_load_ref_weak_barrier) {
1098 Address gc_state_fast(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_array_offset(ShenandoahHeap::WEAK_ROOTS)));
1099 __ lbu(tmp1, gc_state_fast);
1100 __ bnez(tmp1, L_slow);
1101 }
1102
1103 // Cset-check. Fall-through to slow if in collection set.
1104 if (_narrow) {
1105 __ decode_heap_oop_not_null(tmp2, _obj);
1106 } else {
1107 __ mv(tmp2, _obj);
1108 }
1109
1110 __ mv(tmp1, ShenandoahHeap::in_cset_fast_test_addr());
1111 __ srli(tmp2, tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1112 __ add(tmp1, tmp1, tmp2);
1113 __ lbu(tmp1, Address(tmp1, 0));
1114 maybe_far_jump_if_zero(masm, tmp1, L_done);
1115
1116 // Slow path
1117 __ bind(L_slow);
1118
1119 // Obj is the result, need to temporarily stop preserving it.
1120 dont_preserve(_obj);
1121 {
1122 SaveLiveRegisters slr(&masm, this);
1123
1124 // Shuffle in the arguments. The end result should be:
1125 // c_rarg0 <- obj
1126 // c_rarg1 <- lea(addr)
1127 if (c_rarg0 == _obj) {
1128 __ la(c_rarg1, _addr);
1129 } else if (c_rarg1 == _obj) {
1130 // Set up arguments in reverse, and then flip them
1131 __ la(c_rarg0, _addr);
1132 // flip them
1133 __ mv(t0, c_rarg0);
1134 __ mv(c_rarg0, c_rarg1);
1135 __ mv(c_rarg1, t0);
1136 } else {
1137 assert_different_registers(c_rarg1, _obj);
1138 __ la(c_rarg1, _addr);
1139 __ mv(c_rarg0, _obj);
1140 }
1141
1142 // Go to runtime and handle the rest there.
1143 __ rt_call(lrb_runtime_entry_addr());
1144
1145 // Save the result where needed.
1146 if (_narrow) {
1147 __ zext_w(_obj, x10);
1148 } else {
1149 __ mv(_obj, x10);
1150 }
1151 }
1152 preserve(_obj);
1153
1154 __ j(*L_done);
1155 }
1156
1157 #endif // COMPILER2