1 /*
2 * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
3 * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
29 #include "gc/shenandoah/shenandoahForwarding.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "runtime/javaThread.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44
45 #define __ masm->
46
47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
48 Register src, Register dst, Register count, RegSet saved_regs) {
49 if (is_oop) {
50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
52
53 Label done;
54
55 // Avoid calling runtime if count == 0
56 __ beqz(count, done);
57
58 // Is GC active?
59 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
60 assert_different_registers(src, dst, count, t0);
61
62 __ lbu(t0, gc_state);
63 if (ShenandoahSATBBarrier && dest_uninitialized) {
64 __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS);
65 __ beqz(t0, done);
66 } else {
67 __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
68 __ beqz(t0, done);
69 }
70
71 __ push_reg(saved_regs, sp);
72 if (UseCompressedOops) {
73 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry),
74 src, dst, count);
75 } else {
76 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
77 }
78 __ pop_reg(saved_regs, sp);
79 __ bind(done);
80 }
81 }
82 }
83
84 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
85 Register start, Register count, Register tmp, RegSet saved_regs) {
86 if (ShenandoahCardBarrier && is_oop) {
87 gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs);
88 }
89 }
90
91 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
92 Register obj,
93 Register pre_val,
94 Register thread,
95 Register tmp,
96 bool tosca_live,
97 bool expand_call) {
98 if (ShenandoahSATBBarrier) {
99 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
100 }
101 }
102
103 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
104 Register obj,
105 Register pre_val,
106 Register thread,
107 Register tmp1,
108 Register tmp2,
109 bool tosca_live,
110 bool expand_call) {
111 // If expand_call is true then we expand the call_VM_leaf macro
112 // directly to skip generating the check by
113 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
114 assert(thread == xthread, "must be");
115
116 Label done;
117 Label runtime;
118
119 assert_different_registers(obj, pre_val, tmp1, tmp2);
120 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
121
122 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
123 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
124
125 // Is marking active?
126 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
127 __ lbu(t1, gc_state);
128 __ test_bit(t1, t1, ShenandoahHeap::MARKING_BITPOS);
129 __ beqz(t1, done);
130
131 // Do we need to load the previous value?
132 if (obj != noreg) {
133 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
134 }
135
136 // Is the previous value null?
137 __ beqz(pre_val, done);
138
139 // Can we store original value in the thread's buffer?
140 // Is index == 0?
141 // (The index field is typed as size_t.)
142 __ ld(tmp1, index); // tmp := *index_adr
143 __ beqz(tmp1, runtime); // tmp == 0? If yes, goto runtime
144
145 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
146 __ sd(tmp1, index); // *index_adr := tmp
147 __ ld(tmp2, buffer);
148 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
149
150 // Record the previous value
151 __ sd(pre_val, Address(tmp1, 0));
152 __ j(done);
153
154 __ bind(runtime);
155 // save the live input values
156 RegSet saved = RegSet::of(pre_val);
157 if (tosca_live) saved += RegSet::of(x10);
158 if (obj != noreg) saved += RegSet::of(obj);
159
160 __ push_reg(saved, sp);
161
162 // Calling the runtime using the regular call_VM_leaf mechanism generates
163 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
164 // that checks that the *(rfp+frame::interpreter_frame_last_sp) is null.
165 //
166 // If we care generating the pre-barrier without a frame (e.g. in the
167 // intrinsified Reference.get() routine) then ebp might be pointing to
168 // the caller frame and so this check will most likely fail at runtime.
169 //
170 // Expanding the call directly bypasses the generation of the check.
171 // So when we do not have have a full interpreter frame on the stack
172 // expand_call should be passed true.
173 if (expand_call) {
174 assert(pre_val != c_rarg1, "smashed arg");
175 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
176 } else {
177 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
178 }
179
180 __ pop_reg(saved, sp);
181
182 __ bind(done);
183 }
184
185 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
186 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
187
188 Label is_null;
189 __ beqz(dst, is_null);
190 resolve_forward_pointer_not_null(masm, dst, tmp);
191 __ bind(is_null);
192 }
193
194 // IMPORTANT: This must preserve all registers, even t0 and t1, except those explicitly
195 // passed in.
196 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
197 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
198 // The below loads the mark word, checks if the lowest two bits are
199 // set, and if so, clear the lowest two bits and copy the result
200 // to dst. Otherwise it leaves dst alone.
201 // Implementing this is surprisingly awkward. I do it here by:
202 // - Inverting the mark word
203 // - Test lowest two bits == 0
204 // - If so, set the lowest two bits
205 // - Invert the result back, and copy to dst
206 RegSet saved_regs = RegSet::of(t2);
207 bool borrow_reg = (tmp == noreg);
208 if (borrow_reg) {
209 // No free registers available. Make one useful.
210 tmp = t0;
211 if (tmp == dst) {
212 tmp = t1;
213 }
214 saved_regs += RegSet::of(tmp);
215 }
216
217 assert_different_registers(tmp, dst, t2);
218 __ push_reg(saved_regs, sp);
219
220 Label done;
221 __ ld(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
222 __ xori(tmp, tmp, -1); // eon with 0 is equivalent to XOR with -1
223 __ andi(t2, tmp, markWord::lock_mask_in_place);
224 __ bnez(t2, done);
225 __ ori(tmp, tmp, markWord::marked_value);
226 __ xori(dst, tmp, -1); // eon with 0 is equivalent to XOR with -1
227 __ bind(done);
228
229 __ pop_reg(saved_regs, sp);
230 }
231
232 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
233 Register dst,
234 Address load_addr,
235 DecoratorSet decorators) {
236 assert(ShenandoahLoadRefBarrier, "Should be enabled");
237 assert(dst != t1 && load_addr.base() != t1, "need t1");
238 assert_different_registers(load_addr.base(), t0, t1);
239
240 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
241 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
242 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
243 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
244 bool is_narrow = UseCompressedOops && !is_native;
245
246 Label heap_stable, not_cset;
247 __ enter();
248 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
249 __ lbu(t1, gc_state);
250
251 // Check for heap stability
252 if (is_strong) {
253 __ test_bit(t1, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS);
254 __ beqz(t1, heap_stable);
255 } else {
256 Label lrb;
257 __ test_bit(t0, t1, ShenandoahHeap::WEAK_ROOTS_BITPOS);
258 __ bnez(t0, lrb);
259 __ test_bit(t0, t1, ShenandoahHeap::HAS_FORWARDED_BITPOS);
260 __ beqz(t0, heap_stable);
261 __ bind(lrb);
262 }
263
264 // use x11 for load address
265 Register result_dst = dst;
266 if (dst == x11) {
267 __ mv(t1, dst);
268 dst = t1;
269 }
270
271 // Save x10 and x11, unless it is an output register
272 RegSet saved_regs = RegSet::of(x10, x11) - result_dst;
273 __ push_reg(saved_regs, sp);
274 __ la(x11, load_addr);
275 __ mv(x10, dst);
276
277 // Test for in-cset
278 if (is_strong) {
279 __ mv(t1, ShenandoahHeap::in_cset_fast_test_addr());
280 __ srli(t0, x10, ShenandoahHeapRegion::region_size_bytes_shift_jint());
281 __ add(t1, t1, t0);
282 __ lbu(t1, Address(t1));
283 __ test_bit(t0, t1, 0);
284 __ beqz(t0, not_cset);
285 }
286
287 __ push_call_clobbered_registers();
288 address target = nullptr;
289 if (is_strong) {
290 if (is_narrow) {
291 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
292 } else {
293 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
294 }
295 } else if (is_weak) {
296 if (is_narrow) {
297 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
298 } else {
299 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
300 }
301 } else {
302 assert(is_phantom, "only remaining strength");
303 assert(!is_narrow, "phantom access cannot be narrow");
304 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
305 }
306 __ call(target);
307 __ mv(t0, x10);
308 __ pop_call_clobbered_registers();
309 __ mv(x10, t0);
310 __ bind(not_cset);
311 __ mv(result_dst, x10);
312 __ pop_reg(saved_regs, sp);
313
314 __ bind(heap_stable);
315 __ leave();
316 }
317
318 //
319 // Arguments:
320 //
321 // Inputs:
322 // src: oop location to load from, might be clobbered
323 //
324 // Output:
325 // dst: oop loaded from src location
326 //
327 // Kill:
328 // x30 (tmp reg)
329 //
330 // Alias:
331 // dst: x30 (might use x30 as temporary output register to avoid clobbering src)
332 //
333 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
334 DecoratorSet decorators,
335 BasicType type,
336 Register dst,
337 Address src,
338 Register tmp1,
339 Register tmp2) {
340 // 1: non-reference load, no additional barrier is needed
341 if (!is_reference_type(type)) {
342 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
343 return;
344 }
345
346 // 2: load a reference from src location and apply LRB if needed
347 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
348 Register result_dst = dst;
349
350 // Preserve src location for LRB
351 RegSet saved_regs;
352 if (dst == src.base()) {
353 dst = (src.base() == x28) ? x29 : x28;
354 saved_regs = RegSet::of(dst);
355 __ push_reg(saved_regs, sp);
356 }
357 assert_different_registers(dst, src.base());
358
359 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
360
361 load_reference_barrier(masm, dst, src, decorators);
362
363 if (dst != result_dst) {
364 __ mv(result_dst, dst);
365 dst = result_dst;
366 }
367
368 if (saved_regs.bits() != 0) {
369 __ pop_reg(saved_regs, sp);
370 }
371 } else {
372 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
373 }
374
375 // 3: apply keep-alive barrier if needed
376 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
377 __ enter();
378 __ push_call_clobbered_registers();
379 satb_write_barrier_pre(masm /* masm */,
380 noreg /* obj */,
381 dst /* pre_val */,
382 xthread /* thread */,
383 tmp1 /* tmp1 */,
384 tmp2 /* tmp2 */,
385 true /* tosca_live */,
386 true /* expand_call */);
387 __ pop_call_clobbered_registers();
388 __ leave();
389 }
390 }
391
392 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
393 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
394
395 __ srli(obj, obj, CardTable::card_shift());
396
397 assert(CardTable::dirty_card_val() == 0, "must be");
398
399 Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
400 __ ld(t1, curr_ct_holder_addr);
401 __ add(t1, obj, t1);
402
403 if (UseCondCardMark) {
404 Label L_already_dirty;
405 __ lbu(t0, Address(t1));
406 __ beqz(t0, L_already_dirty);
407 __ sb(zr, Address(t1));
408 __ bind(L_already_dirty);
409 } else {
410 __ sb(zr, Address(t1));
411 }
412 }
413
414 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
415 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
416 bool on_oop = is_reference_type(type);
417 if (!on_oop) {
418 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
419 return;
420 }
421
422 // flatten object address if needed
423 if (dst.offset() == 0) {
424 if (dst.base() != tmp3) {
425 __ mv(tmp3, dst.base());
426 }
427 } else {
428 __ la(tmp3, dst);
429 }
430
431 shenandoah_write_barrier_pre(masm,
432 tmp3 /* obj */,
433 tmp2 /* pre_val */,
434 xthread /* thread */,
435 tmp1 /* tmp */,
436 val != noreg /* tosca_live */,
437 false /* expand_call */);
438
439 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
440
441 bool in_heap = (decorators & IN_HEAP) != 0;
442 bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
443 if (needs_post_barrier) {
444 store_check(masm, tmp3);
445 }
446 }
447
448 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
449 Register obj, Register tmp, Label& slowpath) {
450 Label done;
451 // Resolve jobject
452 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
453
454 // Check for null.
455 __ beqz(obj, done);
456
457 assert(obj != t1, "need t1");
458 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
459 __ lbu(t1, gc_state);
460
461 // Check for heap in evacuation phase
462 __ test_bit(t0, t1, ShenandoahHeap::EVACUATION_BITPOS);
463 __ bnez(t0, slowpath);
464
465 __ bind(done);
466 }
467
468 // Special Shenandoah CAS implementation that handles false negatives due
469 // to concurrent evacuation. The service is more complex than a
470 // traditional CAS operation because the CAS operation is intended to
471 // succeed if the reference at addr exactly matches expected or if the
472 // reference at addr holds a pointer to a from-space object that has
473 // been relocated to the location named by expected. There are two
474 // races that must be addressed:
475 // a) A parallel thread may mutate the contents of addr so that it points
476 // to a different object. In this case, the CAS operation should fail.
477 // b) A parallel thread may heal the contents of addr, replacing a
478 // from-space pointer held in addr with the to-space pointer
479 // representing the new location of the object.
480 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
481 // or it refers to an object that is not being evacuated out of
482 // from-space, or it refers to the to-space version of an object that
483 // is being evacuated out of from-space.
484 //
485 // By default the value held in the result register following execution
486 // of the generated code sequence is 0 to indicate failure of CAS,
487 // non-zero to indicate success. If is_cae, the result is the value most
488 // recently fetched from addr rather than a boolean success indicator.
489 //
490 // Clobbers t0, t1
491 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
492 Register addr,
493 Register expected,
494 Register new_val,
495 Assembler::Aqrl acquire,
496 Assembler::Aqrl release,
497 bool is_cae,
498 Register result) {
499 bool is_narrow = UseCompressedOops;
500 Assembler::operand_size size = is_narrow ? Assembler::uint32 : Assembler::int64;
501
502 assert_different_registers(addr, expected, t0, t1);
503 assert_different_registers(addr, new_val, t0, t1);
504
505 Label retry, success, fail, done;
506
507 __ bind(retry);
508
509 // Step1: Try to CAS.
510 __ cmpxchg(addr, expected, new_val, size, acquire, release, /* result */ t1);
511
512 // If success, then we are done.
513 __ beq(expected, t1, success);
514
515 // Step2: CAS failed, check the forwarded pointer.
516 __ mv(t0, t1);
517
518 if (is_narrow) {
519 __ decode_heap_oop(t0, t0);
520 }
521 resolve_forward_pointer(masm, t0);
522
523 __ encode_heap_oop(t0, t0);
524
525 // Report failure when the forwarded oop was not expected.
526 __ bne(t0, expected, fail);
527
528 // Step 3: CAS again using the forwarded oop.
529 __ cmpxchg(addr, t1, new_val, size, acquire, release, /* result */ t0);
530
531 // Retry when failed.
532 __ bne(t0, t1, retry);
533
534 __ bind(success);
535 if (is_cae) {
536 __ mv(result, expected);
537 } else {
538 __ mv(result, 1);
539 }
540 __ j(done);
541
542 __ bind(fail);
543 if (is_cae) {
544 __ mv(result, t0);
545 } else {
546 __ mv(result, zr);
547 }
548
549 __ bind(done);
550 }
551
552 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
553 Register start, Register count, Register tmp, RegSet saved_regs) {
554 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
555
556 Label L_loop, L_done;
557 const Register end = count;
558
559 // Zero count? Nothing to do.
560 __ beqz(count, L_done);
561
562 // end = start + count << LogBytesPerHeapOop
563 // last element address to make inclusive
564 __ shadd(end, count, start, tmp, LogBytesPerHeapOop);
565 __ sub(end, end, BytesPerHeapOop);
566 __ srli(start, start, CardTable::card_shift());
567 __ srli(end, end, CardTable::card_shift());
568
569 // number of bytes to copy
570 __ sub(count, end, start);
571
572 Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
573 __ ld(tmp, curr_ct_holder_addr);
574 __ add(start, start, tmp);
575
576 __ bind(L_loop);
577 __ add(tmp, start, count);
578 __ sb(zr, Address(tmp));
579 __ sub(count, count, 1);
580 __ bgez(count, L_loop);
581 __ bind(L_done);
582 }
583
584 #undef __
585
586 #ifdef COMPILER1
587
588 #define __ ce->masm()->
589
590 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
591 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
592 // At this point we know that marking is in progress.
593 // If do_load() is true then we have to emit the
594 // load of the previous value; otherwise it has already
595 // been loaded into _pre_val.
596 __ bind(*stub->entry());
597
598 assert(stub->pre_val()->is_register(), "Precondition.");
599
600 Register pre_val_reg = stub->pre_val()->as_register();
601
602 if (stub->do_load()) {
603 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */);
604 }
605 __ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true);
606 ce->store_parameter(stub->pre_val()->as_register(), 0);
607 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
608 __ j(*stub->continuation());
609 }
610
611 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce,
612 ShenandoahLoadReferenceBarrierStub* stub) {
613 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
614 __ bind(*stub->entry());
615
616 DecoratorSet decorators = stub->decorators();
617 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
618 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
619 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
620 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
621
622 Register obj = stub->obj()->as_register();
623 Register res = stub->result()->as_register();
624 Register addr = stub->addr()->as_pointer_register();
625 Register tmp1 = stub->tmp1()->as_register();
626 Register tmp2 = stub->tmp2()->as_register();
627
628 assert(res == x10, "result must arrive in x10");
629 assert_different_registers(tmp1, tmp2, t0);
630
631 if (res != obj) {
632 __ mv(res, obj);
633 }
634
635 if (is_strong) {
636 // Check for object in cset.
637 __ mv(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
638 __ srli(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
639 __ add(tmp2, tmp2, tmp1);
640 __ lbu(tmp2, Address(tmp2));
641 __ beqz(tmp2, *stub->continuation(), true /* is_far */);
642 }
643
644 ce->store_parameter(res, 0);
645 ce->store_parameter(addr, 1);
646
647 if (is_strong) {
648 if (is_native) {
649 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
650 } else {
651 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
652 }
653 } else if (is_weak) {
654 __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
655 } else {
656 assert(is_phantom, "only remaining strength");
657 __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
658 }
659
660 __ j(*stub->continuation());
661 }
662
663 #undef __
664
665 #define __ sasm->
666
667 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
668 __ prologue("shenandoah_pre_barrier", false);
669
670 // arg0 : previous value of memory
671
672 BarrierSet* bs = BarrierSet::barrier_set();
673
674 const Register pre_val = x10;
675 const Register thread = xthread;
676 const Register tmp = t0;
677
678 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
679 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
680
681 Label done;
682 Label runtime;
683
684 // Is marking still active?
685 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
686 __ lb(tmp, gc_state);
687 __ test_bit(tmp, tmp, ShenandoahHeap::MARKING_BITPOS);
688 __ beqz(tmp, done);
689
690 // Can we store original value in the thread's buffer?
691 __ ld(tmp, queue_index);
692 __ beqz(tmp, runtime);
693
694 __ sub(tmp, tmp, wordSize);
695 __ sd(tmp, queue_index);
696 __ ld(t1, buffer);
697 __ add(tmp, tmp, t1);
698 __ load_parameter(0, t1);
699 __ sd(t1, Address(tmp, 0));
700 __ j(done);
701
702 __ bind(runtime);
703 __ push_call_clobbered_registers();
704 __ load_parameter(0, pre_val);
705 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
706 __ pop_call_clobbered_registers();
707 __ bind(done);
708
709 __ epilogue();
710 }
711
712 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm,
713 DecoratorSet decorators) {
714 __ prologue("shenandoah_load_reference_barrier", false);
715 // arg0 : object to be resolved
716
717 __ push_call_clobbered_registers();
718 __ load_parameter(0, x10);
719 __ load_parameter(1, x11);
720
721 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
722 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
723 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
724 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
725 address target = nullptr;
726 if (is_strong) {
727 if (is_native) {
728 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
729 } else {
730 if (UseCompressedOops) {
731 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
732 } else {
733 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
734 }
735 }
736 } else if (is_weak) {
737 assert(!is_native, "weak must not be called off-heap");
738 if (UseCompressedOops) {
739 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
740 } else {
741 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
742 }
743 } else {
744 assert(is_phantom, "only remaining strength");
745 assert(is_native, "phantom must only be called off-heap");
746 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
747 }
748 __ call(target);
749 __ mv(t0, x10);
750 __ pop_call_clobbered_registers();
751 __ mv(x10, t0);
752
753 __ epilogue();
754 }
755
756 #undef __
757
758 #endif // COMPILER1