1 /*
2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
29 #include "gc/shenandoah/shenandoahForwarding.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "gc/shenandoah/mode/shenandoahMode.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "interpreter/interp_masm.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #ifdef COMPILER1
41 #include "c1/c1_LIRAssembler.hpp"
42 #include "c1/c1_MacroAssembler.hpp"
43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
44 #endif
45
46 #define __ masm->
47
48 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
49 Register src, Register dst, Register count, RegSet saved_regs) {
50 if (is_oop) {
51 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
52 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
53
54 Label done;
55
56 // Avoid calling runtime if count == 0
57 __ cbz(count, done);
58
59 // Is GC active?
60 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
61 __ ldrb(rscratch1, gc_state);
62 if (ShenandoahSATBBarrier && dest_uninitialized) {
63 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
64 } else {
65 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
66 __ tst(rscratch1, rscratch2);
67 __ br(Assembler::EQ, done);
68 }
69
70 __ push(saved_regs, sp);
71 if (UseCompressedOops) {
72 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), src, dst, count);
73 } else {
74 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
75 }
76 __ pop(saved_regs, sp);
77 __ bind(done);
78 }
79 }
80 }
81
82 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
83 Register start, Register count, Register tmp, RegSet saved_regs) {
84 if (ShenandoahCardBarrier && is_oop) {
85 gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs);
86 }
87 }
88
89 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
90 Register obj,
91 Register pre_val,
92 Register thread,
93 Register tmp,
94 bool tosca_live,
95 bool expand_call) {
96 if (ShenandoahSATBBarrier) {
97 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, rscratch1, tosca_live, expand_call);
98 }
99 }
100
101 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
102 Register obj,
103 Register pre_val,
104 Register thread,
105 Register tmp1,
106 Register tmp2,
107 bool tosca_live,
108 bool expand_call) {
109 // If expand_call is true then we expand the call_VM_leaf macro
110 // directly to skip generating the check by
111 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
112
113 assert(thread == rthread, "must be");
114
115 Label done;
116 Label runtime;
117
118 assert_different_registers(obj, pre_val, tmp1, tmp2);
119 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
120
121 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
122 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
123 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
124
125 // Is marking active?
126 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
127 __ ldrw(tmp1, in_progress);
128 } else {
129 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
130 __ ldrb(tmp1, in_progress);
131 }
132 __ cbzw(tmp1, done);
133
134 // Do we need to load the previous value?
135 if (obj != noreg) {
136 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
137 }
138
139 // Is the previous value null?
140 __ cbz(pre_val, done);
141
142 // Can we store original value in the thread's buffer?
143 // Is index == 0?
144 // (The index field is typed as size_t.)
145
146 __ ldr(tmp1, index); // tmp := *index_adr
147 __ cbz(tmp1, runtime); // tmp == 0?
148 // If yes, goto runtime
149
150 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
151 __ str(tmp1, index); // *index_adr := tmp
152 __ ldr(tmp2, buffer);
153 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
154
155 // Record the previous value
156 __ str(pre_val, Address(tmp1, 0));
157 __ b(done);
158
159 __ bind(runtime);
160 // save the live input values
161 RegSet saved = RegSet::of(pre_val);
162 if (tosca_live) saved += RegSet::of(r0);
163 if (obj != noreg) saved += RegSet::of(obj);
164
165 __ push(saved, sp);
166
167 // Calling the runtime using the regular call_VM_leaf mechanism generates
168 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
169 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
170 //
171 // If we care generating the pre-barrier without a frame (e.g. in the
172 // intrinsified Reference.get() routine) then rfp might be pointing to
173 // the caller frame and so this check will most likely fail at runtime.
174 //
175 // Expanding the call directly bypasses the generation of the check.
176 // So when we do not have have a full interpreter frame on the stack
177 // expand_call should be passed true.
178
179 if (expand_call) {
180 assert(pre_val != c_rarg1, "smashed arg");
181 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
182 } else {
183 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
184 }
185
186 __ pop(saved, sp);
187
188 __ bind(done);
189 }
190
191 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
192 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
193 Label is_null;
194 __ cbz(dst, is_null);
195 resolve_forward_pointer_not_null(masm, dst, tmp);
196 __ bind(is_null);
197 }
198
199 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitly
200 // passed in.
201 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
202 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
203 // The below loads the mark word, checks if the lowest two bits are
204 // set, and if so, clear the lowest two bits and copy the result
205 // to dst. Otherwise it leaves dst alone.
206 // Implementing this is surprisingly awkward. I do it here by:
207 // - Inverting the mark word
208 // - Test lowest two bits == 0
209 // - If so, set the lowest two bits
210 // - Invert the result back, and copy to dst
211
212 bool borrow_reg = (tmp == noreg);
213 if (borrow_reg) {
214 // No free registers available. Make one useful.
215 tmp = rscratch1;
216 if (tmp == dst) {
217 tmp = rscratch2;
218 }
219 __ push(RegSet::of(tmp), sp);
220 }
221
222 assert_different_registers(tmp, dst);
223
224 Label done;
225 __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
226 __ eon(tmp, tmp, zr);
227 __ ands(zr, tmp, markWord::lock_mask_in_place);
228 __ br(Assembler::NE, done);
229 __ orr(tmp, tmp, markWord::marked_value);
230 __ eon(dst, tmp, zr);
231 __ bind(done);
232
233 if (borrow_reg) {
234 __ pop(RegSet::of(tmp), sp);
235 }
236 }
237
238 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators) {
239 assert(ShenandoahLoadRefBarrier, "Should be enabled");
240 assert(dst != rscratch2, "need rscratch2");
241 assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
242
243 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
244 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
245 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
246 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
247 bool is_narrow = UseCompressedOops && !is_native;
248
249 Label heap_stable, not_cset;
250 __ enter(/*strip_ret_addr*/true);
251 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
252 __ ldrb(rscratch2, gc_state);
253
254 // Check for heap stability
255 if (is_strong) {
256 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
257 } else {
258 Label lrb;
259 __ tbnz(rscratch2, ShenandoahHeap::WEAK_ROOTS_BITPOS, lrb);
260 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
261 __ bind(lrb);
262 }
263
264 // use r1 for load address
265 Register result_dst = dst;
266 if (dst == r1) {
267 __ mov(rscratch1, dst);
268 dst = rscratch1;
269 }
270
271 // Save r0 and r1, unless it is an output register
272 RegSet to_save = RegSet::of(r0, r1) - result_dst;
273 __ push(to_save, sp);
274 __ lea(r1, load_addr);
275 __ mov(r0, dst);
276
277 // Test for in-cset
278 if (is_strong) {
279 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
280 __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
281 __ ldrb(rscratch2, Address(rscratch2, rscratch1));
282 __ tbz(rscratch2, 0, not_cset);
283 }
284
285 __ push_call_clobbered_registers();
286 if (is_strong) {
287 if (is_narrow) {
288 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
289 } else {
290 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
291 }
292 } else if (is_weak) {
293 if (is_narrow) {
294 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
295 } else {
296 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
297 }
298 } else {
299 assert(is_phantom, "only remaining strength");
300 assert(!is_narrow, "phantom access cannot be narrow");
301 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
302 }
303 __ blr(lr);
304 __ mov(rscratch1, r0);
305 __ pop_call_clobbered_registers();
306 __ mov(r0, rscratch1);
307
308 __ bind(not_cset);
309
310 __ mov(result_dst, r0);
311 __ pop(to_save, sp);
312
313 __ bind(heap_stable);
314 __ leave();
315 }
316
317 //
318 // Arguments:
319 //
320 // Inputs:
321 // src: oop location to load from, might be clobbered
322 //
323 // Output:
324 // dst: oop loaded from src location
325 //
326 // Kill:
327 // rscratch1 (scratch reg)
328 //
329 // Alias:
330 // dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
331 //
332 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
333 Register dst, Address src, Register tmp1, Register tmp2) {
334 // 1: non-reference load, no additional barrier is needed
335 if (!is_reference_type(type)) {
336 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
337 return;
338 }
339
340 // 2: load a reference from src location and apply LRB if needed
341 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
342 Register result_dst = dst;
343
344 // Preserve src location for LRB
345 if (dst == src.base() || dst == src.index()) {
346 dst = rscratch1;
347 }
348 assert_different_registers(dst, src.base(), src.index());
349
350 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
351
352 load_reference_barrier(masm, dst, src, decorators);
353
354 if (dst != result_dst) {
355 __ mov(result_dst, dst);
356 dst = result_dst;
357 }
358 } else {
359 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
360 }
361
362 // 3: apply keep-alive barrier if needed
363 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
364 __ enter(/*strip_ret_addr*/true);
365 __ push_call_clobbered_registers();
366 satb_write_barrier_pre(masm /* masm */,
367 noreg /* obj */,
368 dst /* pre_val */,
369 rthread /* thread */,
370 tmp1 /* tmp1 */,
371 tmp2 /* tmp2 */,
372 true /* tosca_live */,
373 true /* expand_call */);
374 __ pop_call_clobbered_registers();
375 __ leave();
376 }
377 }
378
379 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
380 assert(ShenandoahCardBarrier, "Should have been checked by caller");
381
382 __ lsr(obj, obj, CardTable::card_shift());
383
384 assert(CardTable::dirty_card_val() == 0, "must be");
385
386 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
387 __ ldr(rscratch1, curr_ct_holder_addr);
388
389 if (UseCondCardMark) {
390 Label L_already_dirty;
391 __ ldrb(rscratch2, Address(obj, rscratch1));
392 __ cbz(rscratch2, L_already_dirty);
393 __ strb(zr, Address(obj, rscratch1));
394 __ bind(L_already_dirty);
395 } else {
396 __ strb(zr, Address(obj, rscratch1));
397 }
398 }
399
400 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
401 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
402 bool on_oop = is_reference_type(type);
403 if (!on_oop) {
404 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
405 return;
406 }
407
408 // flatten object address if needed
409 if (dst.index() == noreg && dst.offset() == 0) {
410 if (dst.base() != tmp3) {
411 __ mov(tmp3, dst.base());
412 }
413 } else {
414 __ lea(tmp3, dst);
415 }
416
417 shenandoah_write_barrier_pre(masm,
418 tmp3 /* obj */,
419 tmp2 /* pre_val */,
420 rthread /* thread */,
421 tmp1 /* tmp */,
422 val != noreg /* tosca_live */,
423 false /* expand_call */);
424
425 if (val == noreg) {
426 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
427 } else {
428 // Barrier needs uncompressed oop for region cross check.
429 Register new_val = val;
430 if (UseCompressedOops) {
431 new_val = rscratch2;
432 __ mov(new_val, val);
433 }
434 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
435 if (ShenandoahCardBarrier) {
436 store_check(masm, tmp3);
437 }
438 }
439
440 }
441
442 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
443 Register obj, Register tmp, Label& slowpath) {
444 Label done;
445 // Resolve jobject
446 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
447
448 // Check for null.
449 __ cbz(obj, done);
450
451 assert(obj != rscratch2, "need rscratch2");
452 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
453 __ lea(rscratch2, gc_state);
454 __ ldrb(rscratch2, Address(rscratch2));
455
456 // Check for heap in evacuation phase
457 __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath);
458
459 __ bind(done);
460 }
461
462 // Special Shenandoah CAS implementation that handles false negatives due
463 // to concurrent evacuation. The service is more complex than a
464 // traditional CAS operation because the CAS operation is intended to
465 // succeed if the reference at addr exactly matches expected or if the
466 // reference at addr holds a pointer to a from-space object that has
467 // been relocated to the location named by expected. There are two
468 // races that must be addressed:
469 // a) A parallel thread may mutate the contents of addr so that it points
470 // to a different object. In this case, the CAS operation should fail.
471 // b) A parallel thread may heal the contents of addr, replacing a
472 // from-space pointer held in addr with the to-space pointer
473 // representing the new location of the object.
474 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
475 // or it refers to an object that is not being evacuated out of
476 // from-space, or it refers to the to-space version of an object that
477 // is being evacuated out of from-space.
478 //
479 // By default the value held in the result register following execution
480 // of the generated code sequence is 0 to indicate failure of CAS,
481 // non-zero to indicate success. If is_cae, the result is the value most
482 // recently fetched from addr rather than a boolean success indicator.
483 //
484 // Clobbers rscratch1, rscratch2
485 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
486 Register addr,
487 Register expected,
488 Register new_val,
489 bool acquire, bool release,
490 bool is_cae,
491 Register result) {
492 Register tmp1 = rscratch1;
493 Register tmp2 = rscratch2;
494 bool is_narrow = UseCompressedOops;
495 Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
496
497 assert_different_registers(addr, expected, tmp1, tmp2);
498 assert_different_registers(addr, new_val, tmp1, tmp2);
499
500 Label step4, done;
501
502 // There are two ways to reach this label. Initial entry into the
503 // cmpxchg_oop code expansion starts at step1 (which is equivalent
504 // to label step4). Additionally, in the rare case that four steps
505 // are required to perform the requested operation, the fourth step
506 // is the same as the first. On a second pass through step 1,
507 // control may flow through step 2 on its way to failure. It will
508 // not flow from step 2 to step 3 since we are assured that the
509 // memory at addr no longer holds a from-space pointer.
510 //
511 // The comments that immediately follow the step4 label apply only
512 // to the case in which control reaches this label by branch from
513 // step 3.
514
515 __ bind (step4);
516
517 // Step 4. CAS has failed because the value most recently fetched
518 // from addr is no longer the from-space pointer held in tmp2. If a
519 // different thread replaced the in-memory value with its equivalent
520 // to-space pointer, then CAS may still be able to succeed. The
521 // value held in the expected register has not changed.
522 //
523 // It is extremely rare we reach this point. For this reason, the
524 // implementation opts for smaller rather than potentially faster
525 // code. Ultimately, smaller code for this rare case most likely
526 // delivers higher overall throughput by enabling improved icache
527 // performance.
528
529 // Step 1. Fast-path.
530 //
531 // Try to CAS with given arguments. If successful, then we are done.
532 //
533 // No label required for step 1.
534
535 __ cmpxchg(addr, expected, new_val, size, acquire, release, false, tmp2);
536 // EQ flag set iff success. tmp2 holds value fetched.
537
538 // If expected equals null but tmp2 does not equal null, the
539 // following branches to done to report failure of CAS. If both
540 // expected and tmp2 equal null, the following branches to done to
541 // report success of CAS. There's no need for a special test of
542 // expected equal to null.
543
544 __ br(Assembler::EQ, done);
545 // if CAS failed, fall through to step 2
546
547 // Step 2. CAS has failed because the value held at addr does not
548 // match expected. This may be a false negative because the value fetched
549 // from addr (now held in tmp2) may be a from-space pointer to the
550 // original copy of same object referenced by to-space pointer expected.
551 //
552 // To resolve this, it suffices to find the forward pointer associated
553 // with fetched value. If this matches expected, retry CAS with new
554 // parameters. If this mismatches, then we have a legitimate
555 // failure, and we're done.
556 //
557 // No need for step2 label.
558
559 // overwrite tmp1 with from-space pointer fetched from memory
560 __ mov(tmp1, tmp2);
561
562 if (is_narrow) {
563 // Decode tmp1 in order to resolve its forward pointer
564 __ decode_heap_oop(tmp1, tmp1);
565 }
566 resolve_forward_pointer(masm, tmp1);
567 // Encode tmp1 to compare against expected.
568 __ encode_heap_oop(tmp1, tmp1);
569
570 // Does forwarded value of fetched from-space pointer match original
571 // value of expected? If tmp1 holds null, this comparison will fail
572 // because we know from step1 that expected is not null. There is
573 // no need for a separate test for tmp1 (the value originally held
574 // in memory) equal to null.
575 __ cmp(tmp1, expected);
576
577 // If not, then the failure was legitimate and we're done.
578 // Branching to done with NE condition denotes failure.
579 __ br(Assembler::NE, done);
580
581 // Fall through to step 3. No need for step3 label.
582
583 // Step 3. We've confirmed that the value originally held in memory
584 // (now held in tmp2) pointed to from-space version of original
585 // expected value. Try the CAS again with the from-space expected
586 // value. If it now succeeds, we're good.
587 //
588 // Note: tmp2 holds encoded from-space pointer that matches to-space
589 // object residing at expected. tmp2 is the new "expected".
590
591 // Note that macro implementation of __cmpxchg cannot use same register
592 // tmp2 for result and expected since it overwrites result before it
593 // compares result with expected.
594 __ cmpxchg(addr, tmp2, new_val, size, acquire, release, false, noreg);
595 // EQ flag set iff success. tmp2 holds value fetched, tmp1 (rscratch1) clobbered.
596
597 // If fetched value did not equal the new expected, this could
598 // still be a false negative because some other thread may have
599 // newly overwritten the memory value with its to-space equivalent.
600 __ br(Assembler::NE, step4);
601
602 if (is_cae) {
603 // We're falling through to done to indicate success. Success
604 // with is_cae is denoted by returning the value of expected as
605 // result.
606 __ mov(tmp2, expected);
607 }
608
609 __ bind(done);
610 // At entry to done, the Z (EQ) flag is on iff if the CAS
611 // operation was successful. Additionally, if is_cae, tmp2 holds
612 // the value most recently fetched from addr. In this case, success
613 // is denoted by tmp2 matching expected.
614
615 if (is_cae) {
616 __ mov(result, tmp2);
617 } else {
618 __ cset(result, Assembler::EQ);
619 }
620 }
621
622 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
623 Register start, Register count, Register scratch, RegSet saved_regs) {
624 assert(ShenandoahCardBarrier, "Should have been checked by caller");
625
626 Label L_loop, L_done;
627 const Register end = count;
628
629 // Zero count? Nothing to do.
630 __ cbz(count, L_done);
631
632 // end = start + count << LogBytesPerHeapOop
633 // last element address to make inclusive
634 __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop)));
635 __ sub(end, end, BytesPerHeapOop);
636 __ lsr(start, start, CardTable::card_shift());
637 __ lsr(end, end, CardTable::card_shift());
638
639 // number of bytes to copy
640 __ sub(count, end, start);
641
642 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
643 __ ldr(scratch, curr_ct_holder_addr);
644 __ add(start, start, scratch);
645 __ bind(L_loop);
646 __ strb(zr, Address(start, count));
647 __ subs(count, count, 1);
648 __ br(Assembler::GE, L_loop);
649 __ bind(L_done);
650 }
651
652 #undef __
653
654 #ifdef COMPILER1
655
656 #define __ ce->masm()->
657
658 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
659 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
660 // At this point we know that marking is in progress.
661 // If do_load() is true then we have to emit the
662 // load of the previous value; otherwise it has already
663 // been loaded into _pre_val.
664
665 __ bind(*stub->entry());
666
667 assert(stub->pre_val()->is_register(), "Precondition.");
668
669 Register pre_val_reg = stub->pre_val()->as_register();
670
671 if (stub->do_load()) {
672 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
673 }
674 __ cbz(pre_val_reg, *stub->continuation());
675 ce->store_parameter(stub->pre_val()->as_register(), 0);
676 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
677 __ b(*stub->continuation());
678 }
679
680 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
681 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
682 __ bind(*stub->entry());
683
684 DecoratorSet decorators = stub->decorators();
685 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
686 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
687 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
688 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
689
690 Register obj = stub->obj()->as_register();
691 Register res = stub->result()->as_register();
692 Register addr = stub->addr()->as_pointer_register();
693 Register tmp1 = stub->tmp1()->as_register();
694 Register tmp2 = stub->tmp2()->as_register();
695
696 assert(res == r0, "result must arrive in r0");
697
698 if (res != obj) {
699 __ mov(res, obj);
700 }
701
702 if (is_strong) {
703 // Check for object in cset.
704 __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
705 __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
706 __ ldrb(tmp2, Address(tmp2, tmp1));
707 __ cbz(tmp2, *stub->continuation());
708 }
709
710 ce->store_parameter(res, 0);
711 ce->store_parameter(addr, 1);
712 if (is_strong) {
713 if (is_native) {
714 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
715 } else {
716 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
717 }
718 } else if (is_weak) {
719 __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
720 } else {
721 assert(is_phantom, "only remaining strength");
722 __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
723 }
724
725 __ b(*stub->continuation());
726 }
727
728 #undef __
729
730 #define __ sasm->
731
732 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
733 __ prologue("shenandoah_pre_barrier", false);
734
735 // arg0 : previous value of memory
736
737 BarrierSet* bs = BarrierSet::barrier_set();
738
739 const Register pre_val = r0;
740 const Register thread = rthread;
741 const Register tmp = rscratch1;
742
743 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
744 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
745
746 Label done;
747 Label runtime;
748
749 // Is marking still active?
750 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
751 __ ldrb(tmp, gc_state);
752 __ tbz(tmp, ShenandoahHeap::MARKING_BITPOS, done);
753
754 // Can we store original value in the thread's buffer?
755 __ ldr(tmp, queue_index);
756 __ cbz(tmp, runtime);
757
758 __ sub(tmp, tmp, wordSize);
759 __ str(tmp, queue_index);
760 __ ldr(rscratch2, buffer);
761 __ add(tmp, tmp, rscratch2);
762 __ load_parameter(0, rscratch2);
763 __ str(rscratch2, Address(tmp, 0));
764 __ b(done);
765
766 __ bind(runtime);
767 __ push_call_clobbered_registers();
768 __ load_parameter(0, pre_val);
769 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
770 __ pop_call_clobbered_registers();
771 __ bind(done);
772
773 __ epilogue();
774 }
775
776 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
777 __ prologue("shenandoah_load_reference_barrier", false);
778 // arg0 : object to be resolved
779
780 __ push_call_clobbered_registers();
781 __ load_parameter(0, r0);
782 __ load_parameter(1, r1);
783
784 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
785 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
786 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
787 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
788 if (is_strong) {
789 if (is_native) {
790 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
791 } else {
792 if (UseCompressedOops) {
793 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
794 } else {
795 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
796 }
797 }
798 } else if (is_weak) {
799 assert(!is_native, "weak must not be called off-heap");
800 if (UseCompressedOops) {
801 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
802 } else {
803 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
804 }
805 } else {
806 assert(is_phantom, "only remaining strength");
807 assert(is_native, "phantom must only be called off-heap");
808 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
809 }
810 __ blr(lr);
811 __ mov(rscratch1, r0);
812 __ pop_call_clobbered_registers();
813 __ mov(r0, rscratch1);
814
815 __ epilogue();
816 }
817
818 #undef __
819
820 #endif // COMPILER1