1 /*
2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interp_masm.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "runtime/javaThread.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44
45 #define __ masm->
46
47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
48 Register src, Register dst, Register count, RegSet saved_regs) {
49 if (is_oop) {
50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
52
53 Label done;
54
55 // Avoid calling runtime if count == 0
56 __ cbz(count, done);
57
58 // Is GC active?
59 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
60 __ ldrb(rscratch1, gc_state);
61 if (ShenandoahSATBBarrier && dest_uninitialized) {
62 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
63 } else {
64 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
65 __ tst(rscratch1, rscratch2);
66 __ br(Assembler::EQ, done);
67 }
68
69 __ push(saved_regs, sp);
70 if (UseCompressedOops) {
71 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop), src, dst, count);
72 } else {
73 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop), src, dst, count);
74 }
75 __ pop(saved_regs, sp);
76 __ bind(done);
77 }
78 }
79 }
80
81 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
82 Register start, Register count, Register tmp, RegSet saved_regs) {
83 if (ShenandoahCardBarrier && is_oop) {
84 gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs);
85 }
86 }
87
88 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
89 Register obj,
90 Register pre_val,
91 Register thread,
92 Register tmp,
93 bool tosca_live,
94 bool expand_call) {
95 if (ShenandoahSATBBarrier) {
96 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
97 }
98 }
99
100 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
101 Register obj,
102 Register pre_val,
103 Register thread,
104 Register tmp1,
105 Register tmp2,
106 bool tosca_live,
107 bool expand_call) {
108 // If expand_call is true then we expand the call_VM_leaf macro
109 // directly to skip generating the check by
110 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
111
112 assert(thread == rthread, "must be");
113
114 Label done;
115 Label runtime;
116
117 assert_different_registers(obj, pre_val, tmp1, tmp2);
118 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
119
120 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
121 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
122
123 // Is marking active?
124 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
125 __ ldrb(tmp1, gc_state);
126 __ tbz(tmp1, ShenandoahHeap::MARKING_BITPOS, done);
127
128 // Do we need to load the previous value?
129 if (obj != noreg) {
130 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
131 }
132
133 // Is the previous value null?
134 __ cbz(pre_val, done);
135
136 // Can we store original value in the thread's buffer?
137 // Is index == 0?
138 // (The index field is typed as size_t.)
139
140 __ ldr(tmp1, index); // tmp := *index_adr
141 __ cbz(tmp1, runtime); // tmp == 0?
142 // If yes, goto runtime
143
144 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
145 __ str(tmp1, index); // *index_adr := tmp
146 __ ldr(tmp2, buffer);
147 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
148
149 // Record the previous value
150 __ str(pre_val, Address(tmp1, 0));
151 __ b(done);
152
153 __ bind(runtime);
154 // save the live input values
155 RegSet saved = RegSet::of(pre_val);
156 if (tosca_live) saved += RegSet::of(r0);
157 if (obj != noreg) saved += RegSet::of(obj);
158
159 __ push(saved, sp);
160
161 // Calling the runtime using the regular call_VM_leaf mechanism generates
162 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
163 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
164 //
165 // If we care generating the pre-barrier without a frame (e.g. in the
166 // intrinsified Reference.get() routine) then rfp might be pointing to
167 // the caller frame and so this check will most likely fail at runtime.
168 //
169 // Expanding the call directly bypasses the generation of the check.
170 // So when we do not have have a full interpreter frame on the stack
171 // expand_call should be passed true.
172
173 if (expand_call) {
174 assert(pre_val != c_rarg1, "smashed arg");
175 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
176 } else {
177 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
178 }
179
180 __ pop(saved, sp);
181
182 __ bind(done);
183 }
184
185 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
186 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
187 Label is_null;
188 __ cbz(dst, is_null);
189 resolve_forward_pointer_not_null(masm, dst, tmp);
190 __ bind(is_null);
191 }
192
193 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitly
194 // passed in.
195 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
196 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
197 // The below loads the mark word, checks if the lowest two bits are
198 // set, and if so, clear the lowest two bits and copy the result
199 // to dst. Otherwise it leaves dst alone.
200 // Implementing this is surprisingly awkward. I do it here by:
201 // - Inverting the mark word
202 // - Test lowest two bits == 0
203 // - If so, set the lowest two bits
204 // - Invert the result back, and copy to dst
205
206 bool borrow_reg = (tmp == noreg);
207 if (borrow_reg) {
208 // No free registers available. Make one useful.
209 tmp = rscratch1;
210 if (tmp == dst) {
211 tmp = rscratch2;
212 }
213 __ push(RegSet::of(tmp), sp);
214 }
215
216 assert_different_registers(tmp, dst);
217
218 Label done;
219 __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
220 __ eon(tmp, tmp, zr);
221 __ ands(zr, tmp, markWord::lock_mask_in_place);
222 __ br(Assembler::NE, done);
223 __ orr(tmp, tmp, markWord::marked_value);
224 __ eon(dst, tmp, zr);
225 __ bind(done);
226
227 if (borrow_reg) {
228 __ pop(RegSet::of(tmp), sp);
229 }
230 }
231
232 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators) {
233 assert(ShenandoahLoadRefBarrier, "Should be enabled");
234 assert(dst != rscratch2, "need rscratch2");
235 assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
236
237 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
238 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
239 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
240 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
241 bool is_narrow = UseCompressedOops && !is_native;
242
243 Label heap_stable, not_cset;
244 __ enter(/*strip_ret_addr*/true);
245 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
246 __ ldrb(rscratch2, gc_state);
247
248 // Check for heap stability
249 if (is_strong) {
250 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
251 } else {
252 Label lrb;
253 __ tbnz(rscratch2, ShenandoahHeap::WEAK_ROOTS_BITPOS, lrb);
254 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
255 __ bind(lrb);
256 }
257
258 // use r1 for load address
259 Register result_dst = dst;
260 if (dst == r1) {
261 __ mov(rscratch1, dst);
262 dst = rscratch1;
263 }
264
265 // Save r0 and r1, unless it is an output register
266 RegSet to_save = RegSet::of(r0, r1) - result_dst;
267 __ push(to_save, sp);
268 __ lea(r1, load_addr);
269 __ mov(r0, dst);
270
271 // Test for in-cset
272 if (is_strong) {
273 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
274 __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
275 __ ldrb(rscratch2, Address(rscratch2, rscratch1));
276 __ tbz(rscratch2, 0, not_cset);
277 }
278
279 __ push_call_clobbered_registers();
280 if (is_strong) {
281 if (is_narrow) {
282 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
283 } else {
284 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
285 }
286 } else if (is_weak) {
287 if (is_narrow) {
288 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
289 } else {
290 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
291 }
292 } else {
293 assert(is_phantom, "only remaining strength");
294 assert(!is_narrow, "phantom access cannot be narrow");
295 // AOT saved adapters need relocation for this call.
296 __ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
297 }
298 __ blr(lr);
299 __ mov(rscratch1, r0);
300 __ pop_call_clobbered_registers();
301 __ mov(r0, rscratch1);
302
303 __ bind(not_cset);
304
305 __ mov(result_dst, r0);
306 __ pop(to_save, sp);
307
308 __ bind(heap_stable);
309 __ leave();
310 }
311
312 //
313 // Arguments:
314 //
315 // Inputs:
316 // src: oop location to load from, might be clobbered
317 //
318 // Output:
319 // dst: oop loaded from src location
320 //
321 // Kill:
322 // rscratch1 (scratch reg)
323 //
324 // Alias:
325 // dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
326 //
327 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
328 Register dst, Address src, Register tmp1, Register tmp2) {
329 // 1: non-reference load, no additional barrier is needed
330 if (!is_reference_type(type)) {
331 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
332 return;
333 }
334
335 // 2: load a reference from src location and apply LRB if needed
336 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
337 Register result_dst = dst;
338
339 // Preserve src location for LRB
340 if (dst == src.base() || dst == src.index()) {
341 dst = rscratch1;
342 }
343 assert_different_registers(dst, src.base(), src.index());
344
345 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
346
347 load_reference_barrier(masm, dst, src, decorators);
348
349 if (dst != result_dst) {
350 __ mov(result_dst, dst);
351 dst = result_dst;
352 }
353 } else {
354 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
355 }
356
357 // 3: apply keep-alive barrier if needed
358 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
359 __ enter(/*strip_ret_addr*/true);
360 __ push_call_clobbered_registers();
361 satb_write_barrier_pre(masm /* masm */,
362 noreg /* obj */,
363 dst /* pre_val */,
364 rthread /* thread */,
365 tmp1 /* tmp1 */,
366 tmp2 /* tmp2 */,
367 true /* tosca_live */,
368 true /* expand_call */);
369 __ pop_call_clobbered_registers();
370 __ leave();
371 }
372 }
373
374 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
375 assert(ShenandoahCardBarrier, "Should have been checked by caller");
376
377 __ lsr(obj, obj, CardTable::card_shift());
378
379 assert(CardTable::dirty_card_val() == 0, "must be");
380
381 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
382 __ ldr(rscratch1, curr_ct_holder_addr);
383
384 if (UseCondCardMark) {
385 Label L_already_dirty;
386 __ ldrb(rscratch2, Address(obj, rscratch1));
387 __ cbz(rscratch2, L_already_dirty);
388 __ strb(zr, Address(obj, rscratch1));
389 __ bind(L_already_dirty);
390 } else {
391 __ strb(zr, Address(obj, rscratch1));
392 }
393 }
394
395 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
396 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
397 bool on_oop = is_reference_type(type);
398 if (!on_oop) {
399 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
400 return;
401 }
402
403 // flatten object address if needed
404 if (dst.index() == noreg && dst.offset() == 0) {
405 if (dst.base() != tmp3) {
406 __ mov(tmp3, dst.base());
407 }
408 } else {
409 __ lea(tmp3, dst);
410 }
411
412 shenandoah_write_barrier_pre(masm,
413 tmp3 /* obj */,
414 tmp2 /* pre_val */,
415 rthread /* thread */,
416 tmp1 /* tmp */,
417 val != noreg /* tosca_live */,
418 false /* expand_call */);
419
420 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
421
422 bool in_heap = (decorators & IN_HEAP) != 0;
423 bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
424 if (needs_post_barrier) {
425 store_check(masm, tmp3);
426 }
427 }
428
429 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
430 Register obj, Register tmp, Label& slowpath) {
431 Label done;
432 // Resolve jobject
433 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
434
435 // Check for null.
436 __ cbz(obj, done);
437
438 assert(obj != rscratch2, "need rscratch2");
439 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
440 __ lea(rscratch2, gc_state);
441 __ ldrb(rscratch2, Address(rscratch2));
442
443 // Check for heap in evacuation phase
444 __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath);
445
446 __ bind(done);
447 }
448
449 // Special Shenandoah CAS implementation that handles false negatives due
450 // to concurrent evacuation. The service is more complex than a
451 // traditional CAS operation because the CAS operation is intended to
452 // succeed if the reference at addr exactly matches expected or if the
453 // reference at addr holds a pointer to a from-space object that has
454 // been relocated to the location named by expected. There are two
455 // races that must be addressed:
456 // a) A parallel thread may mutate the contents of addr so that it points
457 // to a different object. In this case, the CAS operation should fail.
458 // b) A parallel thread may heal the contents of addr, replacing a
459 // from-space pointer held in addr with the to-space pointer
460 // representing the new location of the object.
461 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
462 // or it refers to an object that is not being evacuated out of
463 // from-space, or it refers to the to-space version of an object that
464 // is being evacuated out of from-space.
465 //
466 // By default the value held in the result register following execution
467 // of the generated code sequence is 0 to indicate failure of CAS,
468 // non-zero to indicate success. If is_cae, the result is the value most
469 // recently fetched from addr rather than a boolean success indicator.
470 //
471 // Clobbers rscratch1, rscratch2
472 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
473 Register addr,
474 Register expected,
475 Register new_val,
476 bool acquire, bool release,
477 bool is_cae,
478 Register result) {
479 Register tmp1 = rscratch1;
480 Register tmp2 = rscratch2;
481 bool is_narrow = UseCompressedOops;
482 Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
483
484 assert_different_registers(addr, expected, tmp1, tmp2);
485 assert_different_registers(addr, new_val, tmp1, tmp2);
486
487 Label step4, done;
488
489 // There are two ways to reach this label. Initial entry into the
490 // cmpxchg_oop code expansion starts at step1 (which is equivalent
491 // to label step4). Additionally, in the rare case that four steps
492 // are required to perform the requested operation, the fourth step
493 // is the same as the first. On a second pass through step 1,
494 // control may flow through step 2 on its way to failure. It will
495 // not flow from step 2 to step 3 since we are assured that the
496 // memory at addr no longer holds a from-space pointer.
497 //
498 // The comments that immediately follow the step4 label apply only
499 // to the case in which control reaches this label by branch from
500 // step 3.
501
502 __ bind (step4);
503
504 // Step 4. CAS has failed because the value most recently fetched
505 // from addr is no longer the from-space pointer held in tmp2. If a
506 // different thread replaced the in-memory value with its equivalent
507 // to-space pointer, then CAS may still be able to succeed. The
508 // value held in the expected register has not changed.
509 //
510 // It is extremely rare we reach this point. For this reason, the
511 // implementation opts for smaller rather than potentially faster
512 // code. Ultimately, smaller code for this rare case most likely
513 // delivers higher overall throughput by enabling improved icache
514 // performance.
515
516 // Step 1. Fast-path.
517 //
518 // Try to CAS with given arguments. If successful, then we are done.
519 //
520 // No label required for step 1.
521
522 __ cmpxchg(addr, expected, new_val, size, acquire, release, false, tmp2);
523 // EQ flag set iff success. tmp2 holds value fetched.
524
525 // If expected equals null but tmp2 does not equal null, the
526 // following branches to done to report failure of CAS. If both
527 // expected and tmp2 equal null, the following branches to done to
528 // report success of CAS. There's no need for a special test of
529 // expected equal to null.
530
531 __ br(Assembler::EQ, done);
532 // if CAS failed, fall through to step 2
533
534 // Step 2. CAS has failed because the value held at addr does not
535 // match expected. This may be a false negative because the value fetched
536 // from addr (now held in tmp2) may be a from-space pointer to the
537 // original copy of same object referenced by to-space pointer expected.
538 //
539 // To resolve this, it suffices to find the forward pointer associated
540 // with fetched value. If this matches expected, retry CAS with new
541 // parameters. If this mismatches, then we have a legitimate
542 // failure, and we're done.
543 //
544 // No need for step2 label.
545
546 // overwrite tmp1 with from-space pointer fetched from memory
547 __ mov(tmp1, tmp2);
548
549 if (is_narrow) {
550 // Decode tmp1 in order to resolve its forward pointer
551 __ decode_heap_oop(tmp1, tmp1);
552 }
553 resolve_forward_pointer(masm, tmp1);
554 // Encode tmp1 to compare against expected.
555 __ encode_heap_oop(tmp1, tmp1);
556
557 // Does forwarded value of fetched from-space pointer match original
558 // value of expected? If tmp1 holds null, this comparison will fail
559 // because we know from step1 that expected is not null. There is
560 // no need for a separate test for tmp1 (the value originally held
561 // in memory) equal to null.
562 __ cmp(tmp1, expected);
563
564 // If not, then the failure was legitimate and we're done.
565 // Branching to done with NE condition denotes failure.
566 __ br(Assembler::NE, done);
567
568 // Fall through to step 3. No need for step3 label.
569
570 // Step 3. We've confirmed that the value originally held in memory
571 // (now held in tmp2) pointed to from-space version of original
572 // expected value. Try the CAS again with the from-space expected
573 // value. If it now succeeds, we're good.
574 //
575 // Note: tmp2 holds encoded from-space pointer that matches to-space
576 // object residing at expected. tmp2 is the new "expected".
577
578 // Note that macro implementation of __cmpxchg cannot use same register
579 // tmp2 for result and expected since it overwrites result before it
580 // compares result with expected.
581 __ cmpxchg(addr, tmp2, new_val, size, acquire, release, false, noreg);
582 // EQ flag set iff success. tmp2 holds value fetched, tmp1 (rscratch1) clobbered.
583
584 // If fetched value did not equal the new expected, this could
585 // still be a false negative because some other thread may have
586 // newly overwritten the memory value with its to-space equivalent.
587 __ br(Assembler::NE, step4);
588
589 if (is_cae) {
590 // We're falling through to done to indicate success. Success
591 // with is_cae is denoted by returning the value of expected as
592 // result.
593 __ mov(tmp2, expected);
594 }
595
596 __ bind(done);
597 // At entry to done, the Z (EQ) flag is on iff if the CAS
598 // operation was successful. Additionally, if is_cae, tmp2 holds
599 // the value most recently fetched from addr. In this case, success
600 // is denoted by tmp2 matching expected.
601
602 if (is_cae) {
603 __ mov(result, tmp2);
604 } else {
605 __ cset(result, Assembler::EQ);
606 }
607 }
608
609 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
610 Register start, Register count, Register scratch, RegSet saved_regs) {
611 assert(ShenandoahCardBarrier, "Should have been checked by caller");
612
613 Label L_loop, L_done;
614 const Register end = count;
615
616 // Zero count? Nothing to do.
617 __ cbz(count, L_done);
618
619 // end = start + count << LogBytesPerHeapOop
620 // last element address to make inclusive
621 __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop)));
622 __ sub(end, end, BytesPerHeapOop);
623 __ lsr(start, start, CardTable::card_shift());
624 __ lsr(end, end, CardTable::card_shift());
625
626 // number of bytes to copy
627 __ sub(count, end, start);
628
629 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
630 __ ldr(scratch, curr_ct_holder_addr);
631 __ add(start, start, scratch);
632 __ bind(L_loop);
633 __ strb(zr, Address(start, count));
634 __ subs(count, count, 1);
635 __ br(Assembler::GE, L_loop);
636 __ bind(L_done);
637 }
638
639 #undef __
640
641 #ifdef COMPILER1
642
643 #define __ ce->masm()->
644
645 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
646 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
647 // At this point we know that marking is in progress.
648 // If do_load() is true then we have to emit the
649 // load of the previous value; otherwise it has already
650 // been loaded into _pre_val.
651
652 __ bind(*stub->entry());
653
654 assert(stub->pre_val()->is_register(), "Precondition.");
655
656 Register pre_val_reg = stub->pre_val()->as_register();
657
658 if (stub->do_load()) {
659 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
660 }
661 __ cbz(pre_val_reg, *stub->continuation());
662 ce->store_parameter(stub->pre_val()->as_register(), 0);
663 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
664 __ b(*stub->continuation());
665 }
666
667 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
668 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
669 __ bind(*stub->entry());
670
671 DecoratorSet decorators = stub->decorators();
672 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
673 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
674 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
675 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
676
677 Register obj = stub->obj()->as_register();
678 Register res = stub->result()->as_register();
679 Register addr = stub->addr()->as_pointer_register();
680 Register tmp1 = stub->tmp1()->as_register();
681 Register tmp2 = stub->tmp2()->as_register();
682
683 assert(res == r0, "result must arrive in r0");
684
685 if (res != obj) {
686 __ mov(res, obj);
687 }
688
689 if (is_strong) {
690 // Check for object in cset.
691 __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
692 __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
693 __ ldrb(tmp2, Address(tmp2, tmp1));
694 __ cbz(tmp2, *stub->continuation());
695 }
696
697 ce->store_parameter(res, 0);
698 ce->store_parameter(addr, 1);
699 if (is_strong) {
700 if (is_native) {
701 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
702 } else {
703 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
704 }
705 } else if (is_weak) {
706 __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
707 } else {
708 assert(is_phantom, "only remaining strength");
709 __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
710 }
711
712 __ b(*stub->continuation());
713 }
714
715 #undef __
716
717 #define __ sasm->
718
719 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
720 __ prologue("shenandoah_pre_barrier", false);
721
722 // arg0 : previous value of memory
723
724 BarrierSet* bs = BarrierSet::barrier_set();
725
726 const Register pre_val = r0;
727 const Register thread = rthread;
728 const Register tmp = rscratch1;
729
730 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
731 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
732
733 Label done;
734 Label runtime;
735
736 // Is marking still active?
737 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
738 __ ldrb(tmp, gc_state);
739 __ tbz(tmp, ShenandoahHeap::MARKING_BITPOS, done);
740
741 // Can we store original value in the thread's buffer?
742 __ ldr(tmp, queue_index);
743 __ cbz(tmp, runtime);
744
745 __ sub(tmp, tmp, wordSize);
746 __ str(tmp, queue_index);
747 __ ldr(rscratch2, buffer);
748 __ add(tmp, tmp, rscratch2);
749 __ load_parameter(0, rscratch2);
750 __ str(rscratch2, Address(tmp, 0));
751 __ b(done);
752
753 __ bind(runtime);
754 __ push_call_clobbered_registers();
755 __ load_parameter(0, pre_val);
756 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
757 __ pop_call_clobbered_registers();
758 __ bind(done);
759
760 __ epilogue();
761 }
762
763 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
764 __ prologue("shenandoah_load_reference_barrier", false);
765 // arg0 : object to be resolved
766
767 __ push_call_clobbered_registers();
768 __ load_parameter(0, r0);
769 __ load_parameter(1, r1);
770
771 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
772 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
773 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
774 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
775 if (is_strong) {
776 if (is_native) {
777 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
778 } else {
779 if (UseCompressedOops) {
780 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
781 } else {
782 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
783 }
784 }
785 } else if (is_weak) {
786 assert(!is_native, "weak must not be called off-heap");
787 if (UseCompressedOops) {
788 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
789 } else {
790 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
791 }
792 } else {
793 assert(is_phantom, "only remaining strength");
794 assert(is_native, "phantom must only be called off-heap");
795 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
796 }
797 __ blr(lr);
798 __ mov(rscratch1, r0);
799 __ pop_call_clobbered_registers();
800 __ mov(r0, rscratch1);
801
802 __ epilogue();
803 }
804
805 #undef __
806
807 #endif // COMPILER1