1 /*
2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
28 #include "gc/shenandoah/shenandoahForwarding.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
31 #include "gc/shenandoah/shenandoahRuntime.hpp"
32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "interpreter/interp_masm.hpp"
36 #include "runtime/javaThread.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_LIRAssembler.hpp"
40 #include "c1/c1_MacroAssembler.hpp"
41 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
42 #endif
43
44 #define __ masm->
45
46 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
47 Register src, Register dst, Register count, RegSet saved_regs) {
48 if (is_oop) {
49 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
50 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) {
51
52 Label done;
53
54 // Avoid calling runtime if count == 0
55 __ cbz(count, done);
56
57 // Is GC active?
58 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
59 __ ldrb(rscratch1, gc_state);
60 if (ShenandoahSATBBarrier && dest_uninitialized) {
61 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
62 } else {
63 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
64 __ tst(rscratch1, rscratch2);
65 __ br(Assembler::EQ, done);
66 }
67
68 __ push(saved_regs, sp);
69 if (UseCompressedOops) {
70 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), src, dst, count);
71 } else {
72 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
73 }
74 __ pop(saved_regs, sp);
75 __ bind(done);
76 }
77 }
78 }
79
80 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
81 Register obj,
82 Register pre_val,
83 Register thread,
84 Register tmp,
85 bool tosca_live,
86 bool expand_call) {
87 if (ShenandoahSATBBarrier) {
88 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
89 }
90 }
91
92 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
93 Register obj,
94 Register pre_val,
95 Register thread,
96 Register tmp1,
97 Register tmp2,
98 bool tosca_live,
99 bool expand_call) {
100 // If expand_call is true then we expand the call_VM_leaf macro
101 // directly to skip generating the check by
102 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
103
104 assert(thread == rthread, "must be");
105
106 Label done;
107 Label runtime;
108
109 assert_different_registers(obj, pre_val, tmp1, tmp2);
110 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
111
112 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
113 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
114 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
115
116 // Is marking active?
117 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
118 __ ldrw(tmp1, in_progress);
119 } else {
120 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
121 __ ldrb(tmp1, in_progress);
122 }
123 __ cbzw(tmp1, done);
124
125 // Do we need to load the previous value?
126 if (obj != noreg) {
127 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
128 }
129
130 // Is the previous value null?
131 __ cbz(pre_val, done);
132
133 // Can we store original value in the thread's buffer?
134 // Is index == 0?
135 // (The index field is typed as size_t.)
136
137 __ ldr(tmp1, index); // tmp := *index_adr
138 __ cbz(tmp1, runtime); // tmp == 0?
139 // If yes, goto runtime
140
141 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
142 __ str(tmp1, index); // *index_adr := tmp
143 __ ldr(tmp2, buffer);
144 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
145
146 // Record the previous value
147 __ str(pre_val, Address(tmp1, 0));
148 __ b(done);
149
150 __ bind(runtime);
151 // save the live input values
152 RegSet saved = RegSet::of(pre_val);
153 if (tosca_live) saved += RegSet::of(r0);
154 if (obj != noreg) saved += RegSet::of(obj);
155
156 __ push(saved, sp);
157
158 // Calling the runtime using the regular call_VM_leaf mechanism generates
159 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
160 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
161 //
162 // If we care generating the pre-barrier without a frame (e.g. in the
163 // intrinsified Reference.get() routine) then rfp might be pointing to
164 // the caller frame and so this check will most likely fail at runtime.
165 //
166 // Expanding the call directly bypasses the generation of the check.
167 // So when we do not have have a full interpreter frame on the stack
168 // expand_call should be passed true.
169
170 if (expand_call) {
171 assert(pre_val != c_rarg1, "smashed arg");
172 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
173 } else {
174 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
175 }
176
177 __ pop(saved, sp);
178
179 __ bind(done);
180 }
181
182 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
183 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
184 Label is_null;
185 __ cbz(dst, is_null);
186 resolve_forward_pointer_not_null(masm, dst, tmp);
187 __ bind(is_null);
188 }
189
190 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitly
191 // passed in.
192 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
193 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
194 // The below loads the mark word, checks if the lowest two bits are
195 // set, and if so, clear the lowest two bits and copy the result
196 // to dst. Otherwise it leaves dst alone.
197 // Implementing this is surprisingly awkward. I do it here by:
198 // - Inverting the mark word
199 // - Test lowest two bits == 0
200 // - If so, set the lowest two bits
201 // - Invert the result back, and copy to dst
202
203 bool borrow_reg = (tmp == noreg);
204 if (borrow_reg) {
205 // No free registers available. Make one useful.
206 tmp = rscratch1;
207 if (tmp == dst) {
208 tmp = rscratch2;
209 }
210 __ push(RegSet::of(tmp), sp);
211 }
212
213 assert_different_registers(tmp, dst);
214
215 Label done;
216 __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
217 __ eon(tmp, tmp, zr);
218 __ ands(zr, tmp, markWord::lock_mask_in_place);
219 __ br(Assembler::NE, done);
220 __ orr(tmp, tmp, markWord::marked_value);
221 __ eon(dst, tmp, zr);
222 __ bind(done);
223
224 if (borrow_reg) {
225 __ pop(RegSet::of(tmp), sp);
226 }
227 }
228
229 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators) {
230 assert(ShenandoahLoadRefBarrier, "Should be enabled");
231 assert(dst != rscratch2, "need rscratch2");
232 assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
233
234 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
235 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
236 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
237 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
238 bool is_narrow = UseCompressedOops && !is_native;
239
240 Label heap_stable, not_cset;
241 __ enter(/*strip_ret_addr*/true);
242 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
243 __ ldrb(rscratch2, gc_state);
244
245 // Check for heap stability
246 if (is_strong) {
247 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
248 } else {
249 Label lrb;
250 __ tbnz(rscratch2, ShenandoahHeap::WEAK_ROOTS_BITPOS, lrb);
251 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
252 __ bind(lrb);
253 }
254
255 // use r1 for load address
256 Register result_dst = dst;
257 if (dst == r1) {
258 __ mov(rscratch1, dst);
259 dst = rscratch1;
260 }
261
262 // Save r0 and r1, unless it is an output register
263 RegSet to_save = RegSet::of(r0, r1) - result_dst;
264 __ push(to_save, sp);
265 __ lea(r1, load_addr);
266 __ mov(r0, dst);
267
268 // Test for in-cset
269 if (is_strong) {
270 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
271 __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
272 __ ldrb(rscratch2, Address(rscratch2, rscratch1));
273 __ tbz(rscratch2, 0, not_cset);
274 }
275
276 __ push_call_clobbered_registers();
277 if (is_strong) {
278 if (is_narrow) {
279 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
280 } else {
281 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
282 }
283 } else if (is_weak) {
284 if (is_narrow) {
285 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
286 } else {
287 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
288 }
289 } else {
290 assert(is_phantom, "only remaining strength");
291 assert(!is_narrow, "phantom access cannot be narrow");
292 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
293 }
294 __ blr(lr);
295 __ mov(rscratch1, r0);
296 __ pop_call_clobbered_registers();
297 __ mov(r0, rscratch1);
298
299 __ bind(not_cset);
300
301 __ mov(result_dst, r0);
302 __ pop(to_save, sp);
303
304 __ bind(heap_stable);
305 __ leave();
306 }
307
308 void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) {
309 if (ShenandoahIUBarrier) {
310 __ push_call_clobbered_registers();
311 satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, rscratch1, true, false);
312 __ pop_call_clobbered_registers();
313 }
314 }
315
316 //
317 // Arguments:
318 //
319 // Inputs:
320 // src: oop location to load from, might be clobbered
321 //
322 // Output:
323 // dst: oop loaded from src location
324 //
325 // Kill:
326 // rscratch1 (scratch reg)
327 //
328 // Alias:
329 // dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
330 //
331 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
332 Register dst, Address src, Register tmp1, Register tmp2) {
333 // 1: non-reference load, no additional barrier is needed
334 if (!is_reference_type(type)) {
335 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
336 return;
337 }
338
339 // 2: load a reference from src location and apply LRB if needed
340 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
341 Register result_dst = dst;
342
343 // Preserve src location for LRB
344 if (dst == src.base() || dst == src.index()) {
345 dst = rscratch1;
346 }
347 assert_different_registers(dst, src.base(), src.index());
348
349 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
350
351 load_reference_barrier(masm, dst, src, decorators);
352
353 if (dst != result_dst) {
354 __ mov(result_dst, dst);
355 dst = result_dst;
356 }
357 } else {
358 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
359 }
360
361 // 3: apply keep-alive barrier if needed
362 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
363 __ enter(/*strip_ret_addr*/true);
364 __ push_call_clobbered_registers();
365 satb_write_barrier_pre(masm /* masm */,
366 noreg /* obj */,
367 dst /* pre_val */,
368 rthread /* thread */,
369 tmp1 /* tmp1 */,
370 tmp2 /* tmp2 */,
371 true /* tosca_live */,
372 true /* expand_call */);
373 __ pop_call_clobbered_registers();
374 __ leave();
375 }
376 }
377
378 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
379 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
380 bool on_oop = is_reference_type(type);
381 if (!on_oop) {
382 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
383 return;
384 }
385
386 // flatten object address if needed
387 if (dst.index() == noreg && dst.offset() == 0) {
388 if (dst.base() != tmp3) {
389 __ mov(tmp3, dst.base());
390 }
391 } else {
392 __ lea(tmp3, dst);
393 }
394
395 shenandoah_write_barrier_pre(masm,
396 tmp3 /* obj */,
397 tmp2 /* pre_val */,
398 rthread /* thread */,
399 tmp1 /* tmp */,
400 val != noreg /* tosca_live */,
401 false /* expand_call */);
402
403 if (val == noreg) {
404 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
405 } else {
406 iu_barrier(masm, val, tmp1);
407 // G1 barrier needs uncompressed oop for region cross check.
408 Register new_val = val;
409 if (UseCompressedOops) {
410 new_val = rscratch2;
411 __ mov(new_val, val);
412 }
413 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
414 }
415
416 }
417
418 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
419 Register obj, Register tmp, Label& slowpath) {
420 Label done;
421 // Resolve jobject
422 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
423
424 // Check for null.
425 __ cbz(obj, done);
426
427 assert(obj != rscratch2, "need rscratch2");
428 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
429 __ lea(rscratch2, gc_state);
430 __ ldrb(rscratch2, Address(rscratch2));
431
432 // Check for heap in evacuation phase
433 __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath);
434
435 __ bind(done);
436 }
437
438 // Special Shenandoah CAS implementation that handles false negatives due
439 // to concurrent evacuation. The service is more complex than a
440 // traditional CAS operation because the CAS operation is intended to
441 // succeed if the reference at addr exactly matches expected or if the
442 // reference at addr holds a pointer to a from-space object that has
443 // been relocated to the location named by expected. There are two
444 // races that must be addressed:
445 // a) A parallel thread may mutate the contents of addr so that it points
446 // to a different object. In this case, the CAS operation should fail.
447 // b) A parallel thread may heal the contents of addr, replacing a
448 // from-space pointer held in addr with the to-space pointer
449 // representing the new location of the object.
450 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
451 // or it refers to an object that is not being evacuated out of
452 // from-space, or it refers to the to-space version of an object that
453 // is being evacuated out of from-space.
454 //
455 // By default the value held in the result register following execution
456 // of the generated code sequence is 0 to indicate failure of CAS,
457 // non-zero to indicate success. If is_cae, the result is the value most
458 // recently fetched from addr rather than a boolean success indicator.
459 //
460 // Clobbers rscratch1, rscratch2
461 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
462 Register addr,
463 Register expected,
464 Register new_val,
465 bool acquire, bool release,
466 bool is_cae,
467 Register result) {
468 Register tmp1 = rscratch1;
469 Register tmp2 = rscratch2;
470 bool is_narrow = UseCompressedOops;
471 Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
472
473 assert_different_registers(addr, expected, tmp1, tmp2);
474 assert_different_registers(addr, new_val, tmp1, tmp2);
475
476 Label step4, done;
477
478 // There are two ways to reach this label. Initial entry into the
479 // cmpxchg_oop code expansion starts at step1 (which is equivalent
480 // to label step4). Additionally, in the rare case that four steps
481 // are required to perform the requested operation, the fourth step
482 // is the same as the first. On a second pass through step 1,
483 // control may flow through step 2 on its way to failure. It will
484 // not flow from step 2 to step 3 since we are assured that the
485 // memory at addr no longer holds a from-space pointer.
486 //
487 // The comments that immediately follow the step4 label apply only
488 // to the case in which control reaches this label by branch from
489 // step 3.
490
491 __ bind (step4);
492
493 // Step 4. CAS has failed because the value most recently fetched
494 // from addr is no longer the from-space pointer held in tmp2. If a
495 // different thread replaced the in-memory value with its equivalent
496 // to-space pointer, then CAS may still be able to succeed. The
497 // value held in the expected register has not changed.
498 //
499 // It is extremely rare we reach this point. For this reason, the
500 // implementation opts for smaller rather than potentially faster
501 // code. Ultimately, smaller code for this rare case most likely
502 // delivers higher overall throughput by enabling improved icache
503 // performance.
504
505 // Step 1. Fast-path.
506 //
507 // Try to CAS with given arguments. If successful, then we are done.
508 //
509 // No label required for step 1.
510
511 __ cmpxchg(addr, expected, new_val, size, acquire, release, false, tmp2);
512 // EQ flag set iff success. tmp2 holds value fetched.
513
514 // If expected equals null but tmp2 does not equal null, the
515 // following branches to done to report failure of CAS. If both
516 // expected and tmp2 equal null, the following branches to done to
517 // report success of CAS. There's no need for a special test of
518 // expected equal to null.
519
520 __ br(Assembler::EQ, done);
521 // if CAS failed, fall through to step 2
522
523 // Step 2. CAS has failed because the value held at addr does not
524 // match expected. This may be a false negative because the value fetched
525 // from addr (now held in tmp2) may be a from-space pointer to the
526 // original copy of same object referenced by to-space pointer expected.
527 //
528 // To resolve this, it suffices to find the forward pointer associated
529 // with fetched value. If this matches expected, retry CAS with new
530 // parameters. If this mismatches, then we have a legitimate
531 // failure, and we're done.
532 //
533 // No need for step2 label.
534
535 // overwrite tmp1 with from-space pointer fetched from memory
536 __ mov(tmp1, tmp2);
537
538 if (is_narrow) {
539 // Decode tmp1 in order to resolve its forward pointer
540 __ decode_heap_oop(tmp1, tmp1);
541 }
542 resolve_forward_pointer(masm, tmp1);
543 // Encode tmp1 to compare against expected.
544 __ encode_heap_oop(tmp1, tmp1);
545
546 // Does forwarded value of fetched from-space pointer match original
547 // value of expected? If tmp1 holds null, this comparison will fail
548 // because we know from step1 that expected is not null. There is
549 // no need for a separate test for tmp1 (the value originally held
550 // in memory) equal to null.
551 __ cmp(tmp1, expected);
552
553 // If not, then the failure was legitimate and we're done.
554 // Branching to done with NE condition denotes failure.
555 __ br(Assembler::NE, done);
556
557 // Fall through to step 3. No need for step3 label.
558
559 // Step 3. We've confirmed that the value originally held in memory
560 // (now held in tmp2) pointed to from-space version of original
561 // expected value. Try the CAS again with the from-space expected
562 // value. If it now succeeds, we're good.
563 //
564 // Note: tmp2 holds encoded from-space pointer that matches to-space
565 // object residing at expected. tmp2 is the new "expected".
566
567 // Note that macro implementation of __cmpxchg cannot use same register
568 // tmp2 for result and expected since it overwrites result before it
569 // compares result with expected.
570 __ cmpxchg(addr, tmp2, new_val, size, acquire, release, false, noreg);
571 // EQ flag set iff success. tmp2 holds value fetched, tmp1 (rscratch1) clobbered.
572
573 // If fetched value did not equal the new expected, this could
574 // still be a false negative because some other thread may have
575 // newly overwritten the memory value with its to-space equivalent.
576 __ br(Assembler::NE, step4);
577
578 if (is_cae) {
579 // We're falling through to done to indicate success. Success
580 // with is_cae is denoted by returning the value of expected as
581 // result.
582 __ mov(tmp2, expected);
583 }
584
585 __ bind(done);
586 // At entry to done, the Z (EQ) flag is on iff if the CAS
587 // operation was successful. Additionally, if is_cae, tmp2 holds
588 // the value most recently fetched from addr. In this case, success
589 // is denoted by tmp2 matching expected.
590
591 if (is_cae) {
592 __ mov(result, tmp2);
593 } else {
594 __ cset(result, Assembler::EQ);
595 }
596 }
597
598 #undef __
599
600 #ifdef COMPILER1
601
602 #define __ ce->masm()->
603
604 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
605 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
606 // At this point we know that marking is in progress.
607 // If do_load() is true then we have to emit the
608 // load of the previous value; otherwise it has already
609 // been loaded into _pre_val.
610
611 __ bind(*stub->entry());
612
613 assert(stub->pre_val()->is_register(), "Precondition.");
614
615 Register pre_val_reg = stub->pre_val()->as_register();
616
617 if (stub->do_load()) {
618 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
619 }
620 __ cbz(pre_val_reg, *stub->continuation());
621 ce->store_parameter(stub->pre_val()->as_register(), 0);
622 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
623 __ b(*stub->continuation());
624 }
625
626 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
627 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
628 __ bind(*stub->entry());
629
630 DecoratorSet decorators = stub->decorators();
631 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
632 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
633 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
634 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
635
636 Register obj = stub->obj()->as_register();
637 Register res = stub->result()->as_register();
638 Register addr = stub->addr()->as_pointer_register();
639 Register tmp1 = stub->tmp1()->as_register();
640 Register tmp2 = stub->tmp2()->as_register();
641
642 assert(res == r0, "result must arrive in r0");
643
644 if (res != obj) {
645 __ mov(res, obj);
646 }
647
648 if (is_strong) {
649 // Check for object in cset.
650 __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
651 __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
652 __ ldrb(tmp2, Address(tmp2, tmp1));
653 __ cbz(tmp2, *stub->continuation());
654 }
655
656 ce->store_parameter(res, 0);
657 ce->store_parameter(addr, 1);
658 if (is_strong) {
659 if (is_native) {
660 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
661 } else {
662 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
663 }
664 } else if (is_weak) {
665 __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
666 } else {
667 assert(is_phantom, "only remaining strength");
668 __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
669 }
670
671 __ b(*stub->continuation());
672 }
673
674 #undef __
675
676 #define __ sasm->
677
678 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
679 __ prologue("shenandoah_pre_barrier", false);
680
681 // arg0 : previous value of memory
682
683 BarrierSet* bs = BarrierSet::barrier_set();
684
685 const Register pre_val = r0;
686 const Register thread = rthread;
687 const Register tmp = rscratch1;
688
689 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
690 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
691
692 Label done;
693 Label runtime;
694
695 // Is marking still active?
696 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
697 __ ldrb(tmp, gc_state);
698 __ tbz(tmp, ShenandoahHeap::MARKING_BITPOS, done);
699
700 // Can we store original value in the thread's buffer?
701 __ ldr(tmp, queue_index);
702 __ cbz(tmp, runtime);
703
704 __ sub(tmp, tmp, wordSize);
705 __ str(tmp, queue_index);
706 __ ldr(rscratch2, buffer);
707 __ add(tmp, tmp, rscratch2);
708 __ load_parameter(0, rscratch2);
709 __ str(rscratch2, Address(tmp, 0));
710 __ b(done);
711
712 __ bind(runtime);
713 __ push_call_clobbered_registers();
714 __ load_parameter(0, pre_val);
715 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
716 __ pop_call_clobbered_registers();
717 __ bind(done);
718
719 __ epilogue();
720 }
721
722 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
723 __ prologue("shenandoah_load_reference_barrier", false);
724 // arg0 : object to be resolved
725
726 __ push_call_clobbered_registers();
727 __ load_parameter(0, r0);
728 __ load_parameter(1, r1);
729
730 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
731 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
732 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
733 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
734 if (is_strong) {
735 if (is_native) {
736 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
737 } else {
738 if (UseCompressedOops) {
739 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
740 } else {
741 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
742 }
743 }
744 } else if (is_weak) {
745 assert(!is_native, "weak must not be called off-heap");
746 if (UseCompressedOops) {
747 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
748 } else {
749 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
750 }
751 } else {
752 assert(is_phantom, "only remaining strength");
753 assert(is_native, "phantom must only be called off-heap");
754 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
755 }
756 __ blr(lr);
757 __ mov(rscratch1, r0);
758 __ pop_call_clobbered_registers();
759 __ mov(r0, rscratch1);
760
761 __ epilogue();
762 }
763
764 #undef __
765
766 #endif // COMPILER1