1 /*
2 * Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
28 #include "gc/shenandoah/mode/shenandoahMode.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
31 #include "gc/shenandoah/shenandoahForwarding.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
34 #include "gc/shenandoah/shenandoahRuntime.hpp"
35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #ifdef COMPILER1
41 #include "c1/c1_LIRAssembler.hpp"
42 #include "c1/c1_MacroAssembler.hpp"
43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
44 #endif
45 #ifdef COMPILER2
46 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
47 #include "opto/output.hpp"
48 #endif
49
50 #define __ masm->
51
52 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
53 Register src, Register dst, Register count, RegSet saved_regs) {
54 if (is_oop) {
55 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
56 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
57
58 Label done;
59
60 // Avoid calling runtime if count == 0
61 __ cbz(count, done);
62
63 // Is GC active?
64 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
65 __ ldrb(rscratch1, gc_state);
66 if (ShenandoahSATBBarrier && dest_uninitialized) {
67 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
68 } else {
69 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
70 __ tst(rscratch1, rscratch2);
71 __ br(Assembler::EQ, done);
72 }
73
74 __ push(saved_regs, sp);
75 if (UseCompressedOops) {
76 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop), src, dst, count);
77 } else {
78 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop), src, dst, count);
79 }
80 __ pop(saved_regs, sp);
81 __ bind(done);
82 }
83 }
84 }
85
86 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
87 Register start, Register count, Register tmp) {
88 if (ShenandoahCardBarrier && is_oop) {
89 gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp);
90 }
91 }
92
93 void ShenandoahBarrierSetAssembler::satb_barrier(MacroAssembler* masm,
94 Register obj,
95 Register pre_val,
96 Register thread,
97 Register tmp1,
98 Register tmp2,
99 bool tosca_live,
100 bool expand_call) {
101 assert(ShenandoahSATBBarrier, "Should be checked by caller");
102
103 // If expand_call is true then we expand the call_VM_leaf macro
104 // directly to skip generating the check by
105 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
106
107 assert(thread == rthread, "must be");
108
109 Label done;
110 Label runtime;
111
112 assert_different_registers(obj, pre_val, tmp1, tmp2);
113 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
114
115 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
116 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
117
118 // Is marking active?
119 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
120 __ ldrb(tmp1, gc_state);
121 __ tbz(tmp1, ShenandoahHeap::MARKING_BITPOS, done);
122
123 // Do we need to load the previous value?
124 if (obj != noreg) {
125 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
126 }
127
128 // Is the previous value null?
129 __ cbz(pre_val, done);
130
131 // Can we store original value in the thread's buffer?
132 // Is index == 0?
133 // (The index field is typed as size_t.)
134
135 __ ldr(tmp1, index); // tmp := *index_adr
136 __ cbz(tmp1, runtime); // tmp == 0?
137 // If yes, goto runtime
138
139 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
140 __ str(tmp1, index); // *index_adr := tmp
141 __ ldr(tmp2, buffer);
142 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
143
144 // Record the previous value
145 __ str(pre_val, Address(tmp1, 0));
146 __ b(done);
147
148 __ bind(runtime);
149 // save the live input values
150 RegSet saved = RegSet::of(pre_val);
151 if (tosca_live) saved += RegSet::of(r0);
152 if (obj != noreg) saved += RegSet::of(obj);
153
154 __ push(saved, sp);
155
156 // Calling the runtime using the regular call_VM_leaf mechanism generates
157 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
158 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
159 //
160 // If we care generating the pre-barrier without a frame (e.g. in the
161 // intrinsified Reference.get() routine) then rfp might be pointing to
162 // the caller frame and so this check will most likely fail at runtime.
163 //
164 // Expanding the call directly bypasses the generation of the check.
165 // So when we do not have have a full interpreter frame on the stack
166 // expand_call should be passed true.
167
168 if (expand_call) {
169 assert(pre_val != c_rarg1, "smashed arg");
170 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
171 } else {
172 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
173 }
174
175 __ pop(saved, sp);
176
177 __ bind(done);
178 }
179
180 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
181 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
182 Label is_null;
183 __ cbz(dst, is_null);
184 resolve_forward_pointer_not_null(masm, dst, tmp);
185 __ bind(is_null);
186 }
187
188 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitly
189 // passed in.
190 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
191 assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
192 // The below loads the mark word, checks if the lowest two bits are
193 // set, and if so, clear the lowest two bits and copy the result
194 // to dst. Otherwise it leaves dst alone.
195 // Implementing this is surprisingly awkward. I do it here by:
196 // - Inverting the mark word
197 // - Test lowest two bits == 0
198 // - If so, set the lowest two bits
199 // - Invert the result back, and copy to dst
200
201 bool borrow_reg = (tmp == noreg);
202 if (borrow_reg) {
203 // No free registers available. Make one useful.
204 tmp = rscratch1;
205 if (tmp == dst) {
206 tmp = rscratch2;
207 }
208 __ push(RegSet::of(tmp), sp);
209 }
210
211 assert_different_registers(tmp, dst);
212
213 Label done;
214 __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
215 __ eon(tmp, tmp, zr);
216 __ ands(zr, tmp, markWord::lock_mask_in_place);
217 __ br(Assembler::NE, done);
218 __ orr(tmp, tmp, markWord::marked_value);
219 __ eon(dst, tmp, zr);
220 __ bind(done);
221
222 if (borrow_reg) {
223 __ pop(RegSet::of(tmp), sp);
224 }
225 }
226
227 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr, DecoratorSet decorators) {
228 assert(ShenandoahLoadRefBarrier, "Should be enabled");
229 assert(dst != rscratch2, "need rscratch2");
230 assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
231
232 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
233 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
234 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
235 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
236 bool is_narrow = UseCompressedOops && !is_native;
237
238 Label heap_stable, not_cset;
239 __ enter(/*strip_ret_addr*/true);
240 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
241 __ ldrb(rscratch2, gc_state);
242
243 // Check for heap stability
244 if (is_strong) {
245 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
246 } else {
247 Label lrb;
248 __ tbnz(rscratch2, ShenandoahHeap::WEAK_ROOTS_BITPOS, lrb);
249 __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable);
250 __ bind(lrb);
251 }
252
253 // use r1 for load address
254 Register result_dst = dst;
255 if (dst == r1) {
256 __ mov(rscratch1, dst);
257 dst = rscratch1;
258 }
259
260 // Save r0 and r1, unless it is an output register
261 RegSet to_save = RegSet::of(r0, r1) - result_dst;
262 __ push(to_save, sp);
263 __ lea(r1, load_addr);
264 __ mov(r0, dst);
265
266 // Test for in-cset
267 if (is_strong) {
268 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
269 __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
270 __ ldrb(rscratch2, Address(rscratch2, rscratch1));
271 __ tbz(rscratch2, 0, not_cset);
272 }
273
274 __ push_call_clobbered_registers();
275 if (is_strong) {
276 if (is_narrow) {
277 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
278 } else {
279 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
280 }
281 } else if (is_weak) {
282 if (is_narrow) {
283 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
284 } else {
285 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
286 }
287 } else {
288 assert(is_phantom, "only remaining strength");
289 assert(!is_narrow, "phantom access cannot be narrow");
290 // AOT saved adapters need relocation for this call.
291 __ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
292 }
293 __ blr(lr);
294 __ mov(rscratch1, r0);
295 __ pop_call_clobbered_registers();
296 __ mov(r0, rscratch1);
297
298 __ bind(not_cset);
299
300 __ mov(result_dst, r0);
301 __ pop(to_save, sp);
302
303 __ bind(heap_stable);
304 __ leave();
305 }
306
307 //
308 // Arguments:
309 //
310 // Inputs:
311 // src: oop location to load from, might be clobbered
312 //
313 // Output:
314 // dst: oop loaded from src location
315 //
316 // Kill:
317 // rscratch1 (scratch reg)
318 //
319 // Alias:
320 // dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
321 //
322 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
323 Register dst, Address src, Register tmp1, Register tmp2) {
324 // 1: non-reference load, no additional barrier is needed
325 if (!is_reference_type(type)) {
326 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
327 return;
328 }
329
330 // 2: load a reference from src location and apply LRB if needed
331 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
332 Register result_dst = dst;
333
334 // Preserve src location for LRB
335 if (dst == src.base() || dst == src.index()) {
336 dst = rscratch1;
337 }
338 assert_different_registers(dst, src.base(), src.index());
339
340 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
341
342 load_reference_barrier(masm, dst, src, decorators);
343
344 if (dst != result_dst) {
345 __ mov(result_dst, dst);
346 dst = result_dst;
347 }
348 } else {
349 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
350 }
351
352 // 3: apply keep-alive barrier if needed
353 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
354 __ enter(/*strip_ret_addr*/true);
355 __ push_call_clobbered_registers();
356 satb_barrier(masm /* masm */,
357 noreg /* obj */,
358 dst /* pre_val */,
359 rthread /* thread */,
360 tmp1 /* tmp1 */,
361 tmp2 /* tmp2 */,
362 true /* tosca_live */,
363 true /* expand_call */);
364 __ pop_call_clobbered_registers();
365 __ leave();
366 }
367 }
368
369 void ShenandoahBarrierSetAssembler::card_barrier(MacroAssembler* masm, Register obj) {
370 assert(ShenandoahCardBarrier, "Should have been checked by caller");
371
372 __ lsr(obj, obj, CardTable::card_shift());
373
374 assert(CardTable::dirty_card_val() == 0, "must be");
375
376 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
377 __ ldr(rscratch1, curr_ct_holder_addr);
378
379 if (UseCondCardMark) {
380 Label L_already_dirty;
381 __ ldrb(rscratch2, Address(obj, rscratch1));
382 __ cbz(rscratch2, L_already_dirty);
383 __ strb(zr, Address(obj, rscratch1));
384 __ bind(L_already_dirty);
385 } else {
386 __ strb(zr, Address(obj, rscratch1));
387 }
388 }
389
390 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
391 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
392 // 1: non-reference types require no barriers
393 if (!is_reference_type(type)) {
394 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
395 return;
396 }
397
398 // Flatten object address right away for simplicity: likely needed by barriers
399 if (dst.index() == noreg && dst.offset() == 0) {
400 if (dst.base() != tmp3) {
401 __ mov(tmp3, dst.base());
402 }
403 } else {
404 __ lea(tmp3, dst);
405 }
406
407 bool storing_non_null = (val != noreg);
408
409 // 2: pre-barrier: SATB needs the previous value
410 if (ShenandoahBarrierSet::need_satb_barrier(decorators, type)) {
411 satb_barrier(masm,
412 tmp3 /* obj */,
413 tmp2 /* pre_val */,
414 rthread /* thread */,
415 tmp1 /* tmp */,
416 rscratch1 /* tmp2 */,
417 storing_non_null /* tosca_live */,
418 false /* expand_call */);
419 }
420
421 // Store!
422 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
423
424 // 3: post-barrier: card barrier needs store address
425 if (ShenandoahBarrierSet::need_card_barrier(decorators, type) && storing_non_null) {
426 card_barrier(masm, tmp3);
427 }
428 }
429
430 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
431 Register obj, Register tmp, Label& slowpath) {
432 Label done;
433 // Resolve jobject
434 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
435
436 // Check for null.
437 __ cbz(obj, done);
438
439 assert(obj != rscratch2, "need rscratch2");
440 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
441 __ lea(rscratch2, gc_state);
442 __ ldrb(rscratch2, Address(rscratch2));
443
444 // Check for heap in evacuation phase
445 __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath);
446
447 __ bind(done);
448 }
449
450 #ifdef COMPILER2
451 void ShenandoahBarrierSetAssembler::try_resolve_weak_handle_in_c2(MacroAssembler* masm, Register obj,
452 Register tmp, Label& slow_path) {
453 assert_different_registers(obj, tmp);
454
455 Label done;
456
457 // Resolve weak handle using the standard implementation.
458 BarrierSetAssembler::try_resolve_weak_handle_in_c2(masm, obj, tmp, slow_path);
459
460 // Check if the reference is null, and if it is, take the fast path.
461 __ cbz(obj, done);
462
463 Address gc_state(rthread, ShenandoahThreadLocalData::gc_state_offset());
464 __ lea(tmp, gc_state);
465 __ ldrb(tmp, __ legitimize_address(gc_state, 1, tmp));
466
467 // Check if the heap is under weak-reference/roots processing, in
468 // which case we need to take the slow path.
469 __ tbnz(tmp, ShenandoahHeap::WEAK_ROOTS_BITPOS, slow_path);
470 __ bind(done);
471 }
472 #endif
473
474 // Special Shenandoah CAS implementation that handles false negatives due
475 // to concurrent evacuation. The service is more complex than a
476 // traditional CAS operation because the CAS operation is intended to
477 // succeed if the reference at addr exactly matches expected or if the
478 // reference at addr holds a pointer to a from-space object that has
479 // been relocated to the location named by expected. There are two
480 // races that must be addressed:
481 // a) A parallel thread may mutate the contents of addr so that it points
482 // to a different object. In this case, the CAS operation should fail.
483 // b) A parallel thread may heal the contents of addr, replacing a
484 // from-space pointer held in addr with the to-space pointer
485 // representing the new location of the object.
486 // Upon entry to cmpxchg_oop, it is assured that new_val equals null
487 // or it refers to an object that is not being evacuated out of
488 // from-space, or it refers to the to-space version of an object that
489 // is being evacuated out of from-space.
490 //
491 // By default the value held in the result register following execution
492 // of the generated code sequence is 0 to indicate failure of CAS,
493 // non-zero to indicate success. If is_cae, the result is the value most
494 // recently fetched from addr rather than a boolean success indicator.
495 //
496 // Clobbers rscratch1, rscratch2
497 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
498 Register addr,
499 Register expected,
500 Register new_val,
501 bool acquire, bool release,
502 bool is_cae,
503 Register result) {
504 Register tmp1 = rscratch1;
505 Register tmp2 = rscratch2;
506 bool is_narrow = UseCompressedOops;
507 Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
508
509 assert_different_registers(addr, expected, tmp1, tmp2);
510 assert_different_registers(addr, new_val, tmp1, tmp2);
511
512 Label step4, done;
513
514 // There are two ways to reach this label. Initial entry into the
515 // cmpxchg_oop code expansion starts at step1 (which is equivalent
516 // to label step4). Additionally, in the rare case that four steps
517 // are required to perform the requested operation, the fourth step
518 // is the same as the first. On a second pass through step 1,
519 // control may flow through step 2 on its way to failure. It will
520 // not flow from step 2 to step 3 since we are assured that the
521 // memory at addr no longer holds a from-space pointer.
522 //
523 // The comments that immediately follow the step4 label apply only
524 // to the case in which control reaches this label by branch from
525 // step 3.
526
527 __ bind (step4);
528
529 // Step 4. CAS has failed because the value most recently fetched
530 // from addr is no longer the from-space pointer held in tmp2. If a
531 // different thread replaced the in-memory value with its equivalent
532 // to-space pointer, then CAS may still be able to succeed. The
533 // value held in the expected register has not changed.
534 //
535 // It is extremely rare we reach this point. For this reason, the
536 // implementation opts for smaller rather than potentially faster
537 // code. Ultimately, smaller code for this rare case most likely
538 // delivers higher overall throughput by enabling improved icache
539 // performance.
540
541 // Step 1. Fast-path.
542 //
543 // Try to CAS with given arguments. If successful, then we are done.
544 //
545 // No label required for step 1.
546
547 __ cmpxchg(addr, expected, new_val, size, acquire, release, false, tmp2);
548 // EQ flag set iff success. tmp2 holds value fetched.
549
550 // If expected equals null but tmp2 does not equal null, the
551 // following branches to done to report failure of CAS. If both
552 // expected and tmp2 equal null, the following branches to done to
553 // report success of CAS. There's no need for a special test of
554 // expected equal to null.
555
556 __ br(Assembler::EQ, done);
557 // if CAS failed, fall through to step 2
558
559 // Step 2. CAS has failed because the value held at addr does not
560 // match expected. This may be a false negative because the value fetched
561 // from addr (now held in tmp2) may be a from-space pointer to the
562 // original copy of same object referenced by to-space pointer expected.
563 //
564 // To resolve this, it suffices to find the forward pointer associated
565 // with fetched value. If this matches expected, retry CAS with new
566 // parameters. If this mismatches, then we have a legitimate
567 // failure, and we're done.
568 //
569 // No need for step2 label.
570
571 // overwrite tmp1 with from-space pointer fetched from memory
572 __ mov(tmp1, tmp2);
573
574 if (is_narrow) {
575 // Decode tmp1 in order to resolve its forward pointer
576 __ decode_heap_oop(tmp1, tmp1);
577 }
578 resolve_forward_pointer(masm, tmp1);
579 // Encode tmp1 to compare against expected.
580 __ encode_heap_oop(tmp1, tmp1);
581
582 // Does forwarded value of fetched from-space pointer match original
583 // value of expected? If tmp1 holds null, this comparison will fail
584 // because we know from step1 that expected is not null. There is
585 // no need for a separate test for tmp1 (the value originally held
586 // in memory) equal to null.
587 __ cmp(tmp1, expected);
588
589 // If not, then the failure was legitimate and we're done.
590 // Branching to done with NE condition denotes failure.
591 __ br(Assembler::NE, done);
592
593 // Fall through to step 3. No need for step3 label.
594
595 // Step 3. We've confirmed that the value originally held in memory
596 // (now held in tmp2) pointed to from-space version of original
597 // expected value. Try the CAS again with the from-space expected
598 // value. If it now succeeds, we're good.
599 //
600 // Note: tmp2 holds encoded from-space pointer that matches to-space
601 // object residing at expected. tmp2 is the new "expected".
602
603 // Note that macro implementation of __cmpxchg cannot use same register
604 // tmp2 for result and expected since it overwrites result before it
605 // compares result with expected.
606 __ cmpxchg(addr, tmp2, new_val, size, acquire, release, false, noreg);
607 // EQ flag set iff success. tmp2 holds value fetched, tmp1 (rscratch1) clobbered.
608
609 // If fetched value did not equal the new expected, this could
610 // still be a false negative because some other thread may have
611 // newly overwritten the memory value with its to-space equivalent.
612 __ br(Assembler::NE, step4);
613
614 if (is_cae) {
615 // We're falling through to done to indicate success. Success
616 // with is_cae is denoted by returning the value of expected as
617 // result.
618 __ mov(tmp2, expected);
619 }
620
621 __ bind(done);
622 // At entry to done, the Z (EQ) flag is on iff if the CAS
623 // operation was successful. Additionally, if is_cae, tmp2 holds
624 // the value most recently fetched from addr. In this case, success
625 // is denoted by tmp2 matching expected.
626
627 if (is_cae) {
628 __ mov(result, tmp2);
629 } else {
630 __ cset(result, Assembler::EQ);
631 }
632 }
633
634 #ifdef COMPILER2
635 void ShenandoahBarrierStubC2::gc_state_check_c2(MacroAssembler* masm, Register gcstate, const unsigned char test_state, ShenandoahBarrierStubC2* slow_stub) {
636 if (ShenandoahGCStateCheckRemove) {
637 // Unrealistic: remove all barrier fastpath checks.
638 } else if (ShenandoahGCStateCheckHotpatch) {
639 // In the ideal world, we would hot-patch the branch to slow stub with a single
640 // (unconditional) jump or nop, based on our current GC state.
641 __ nop();
642 } else {
643 int bit_to_check = ShenandoahThreadLocalData::gc_state_to_fast_bit(test_state);
644 Address gc_state_fast(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_fast_offset()));
645 __ ldrb(gcstate, gc_state_fast);
646 if (slow_stub->_test_and_branch_reachable) {
647 __ tbnz(gcstate, bit_to_check, *slow_stub->entry());
648 } else {
649 __ tbz(gcstate, bit_to_check, *slow_stub->continuation());
650 __ b(*slow_stub->entry());
651 }
652
653 // This is were the slowpath stub will return to or the code above will
654 // jump to if the checks are false
655 __ bind(*slow_stub->continuation());
656 }
657 }
658
659 bool needs_acquiring_load_exclusive(const MachNode *n) {
660 assert(n->is_CAS(true), "expecting a compare and swap");
661 if (n->is_CAS(false)) {
662 assert(n->has_trailing_membar(), "expected trailing membar");
663 } else {
664 return n->has_trailing_membar();
665 }
666
667 // so we can just return true here
668 return true;
669 }
670
671 void ShenandoahBarrierSetAssembler::compare_and_set_c2(const MachNode* node, MacroAssembler* masm, Register res, Register addr,
672 Register oldval, Register newval, Register tmp, bool exchange, bool maybe_null, bool narrow, bool weak) {
673 bool acquire = needs_acquiring_load_exclusive(node);
674 Assembler::operand_size op_size = narrow ? Assembler::word : Assembler::xword;
675
676 // Pre-barrier covers several things:
677 // a. Avoids false positives from CAS encountering to-space memory values.
678 // b. Satisfies the need for LRB for the CAE result.
679 // c. Records old value for the sake of SATB.
680 //
681 // (a) and (b) are covered because load barrier does memory location fixup.
682 // (c) is covered by KA on the current memory value.
683 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
684 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, narrow, /* do_load: */ true, __ offset());
685 char check = 0;
686 check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
687 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
688 assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for CAS");
689 ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, check, stub);
690 }
691
692 // CAS!
693 __ cmpxchg(addr, oldval, newval, op_size, acquire, /* release */ true, weak, exchange ? res : noreg);
694
695 // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
696 if (!exchange) {
697 assert(res != noreg, "need result register");
698 __ cset(res, Assembler::EQ);
699 }
700
701 // Post-barrier deals with card updates.
702 card_barrier_c2(node, masm, Address(addr, 0));
703 }
704
705 void ShenandoahBarrierSetAssembler::get_and_set_c2(const MachNode* node, MacroAssembler* masm, Register preval,
706 Register newval, Register addr, Register tmp) {
707 bool acquire = needs_acquiring_load_exclusive(node);
708 bool narrow = node->bottom_type()->isa_narrowoop();
709
710 // Pre-barrier covers several things:
711 // a. Satisfies the need for LRB for the GAS result.
712 // b. Records old value for the sake of SATB.
713 //
714 // (a) is covered because load barrier does memory location fixup.
715 // (b) is covered by KA on the current memory value.
716 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
717 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, addr, narrow, /* do_load: */ true, __ offset());
718 char check = 0;
719 check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
720 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
721 assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node), "Not supported for GAS");
722 ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, check, stub);
723 }
724
725 if (narrow) {
726 if (acquire) {
727 __ atomic_xchgalw(preval, newval, addr);
728 } else {
729 __ atomic_xchgw(preval, newval, addr);
730 }
731 } else {
732 if (acquire) {
733 __ atomic_xchgal(preval, newval, addr);
734 } else {
735 __ atomic_xchg(preval, newval, addr);
736 }
737 }
738
739 // Post-barrier deals with card updates.
740 card_barrier_c2(node, masm, Address(addr, 0));
741 }
742
743 void ShenandoahBarrierSetAssembler::store_c2(const MachNode* node, MacroAssembler* masm, Address dst, bool dst_narrow,
744 Register src, bool src_narrow, Register tmp) {
745
746 // Pre-barrier: SATB, keep-alive the current memory value.
747 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
748 assert(!ShenandoahBarrierStubC2::needs_load_ref_barrier(node), "Should not be required for stores");
749 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, tmp, dst, dst_narrow, /* do_load: */ true, __ offset());
750 ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, ShenandoahHeap::MARKING, stub);
751 }
752
753 // Do the actual store
754 bool is_volatile = node->has_trailing_membar();
755 if (dst_narrow) {
756 if (!src_narrow) {
757 // Need to encode into rscratch, because we cannot clobber src.
758 // TODO: Maybe there is a matcher way to test that src is unused after this?
759 __ mov(rscratch1, src);
760 if (ShenandoahBarrierStubC2::src_not_null(node)) {
761 __ encode_heap_oop_not_null(rscratch1);
762 } else {
763 __ encode_heap_oop(rscratch1);
764 }
765 src = rscratch1;
766 }
767
768 if (is_volatile) {
769 __ stlrw(src, dst.base());
770 } else {
771 __ strw(src, dst);
772 }
773 } else {
774 if (is_volatile) {
775 __ stlr(src, dst.base());
776 } else {
777 __ str(src, dst);
778 }
779 }
780
781 // Post-barrier: card updates.
782 card_barrier_c2(node, masm, dst);
783 }
784
785 void ShenandoahBarrierSetAssembler::load_c2(const MachNode* node, MacroAssembler* masm, Register dst, Address src) {
786 bool acquire = node->memory_order() == MemNode::MemOrd::acquire;
787 bool narrow = node->bottom_type()->isa_narrowoop();
788
789 // Do the actual load. This load is the candidate for implicit null check, and MUST come first.
790 if (narrow) {
791 if (acquire) {
792 __ ldarw(dst, src.base());
793 } else {
794 __ ldrw(dst, src);
795 }
796 } else {
797 if (acquire) {
798 __ ldar(dst, src.base());
799 } else {
800 __ ldr(dst, src);
801 }
802 }
803
804 // Post-barrier: LRB / KA / weak-root processing.
805 if (ShenandoahBarrierStubC2::needs_slow_barrier(node)) {
806 ShenandoahBarrierStubC2* const stub = ShenandoahBarrierStubC2::create(node, dst, src, narrow, /* do_load: */ false, __ offset());
807 char check = 0;
808 check |= ShenandoahBarrierStubC2::needs_keep_alive_barrier(node) ? ShenandoahHeap::MARKING : 0;
809 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier(node) ? ShenandoahHeap::HAS_FORWARDED : 0;
810 check |= ShenandoahBarrierStubC2::needs_load_ref_barrier_weak(node) ? ShenandoahHeap::WEAK_ROOTS : 0;
811 ShenandoahBarrierStubC2::gc_state_check_c2(masm, rscratch1, check, stub);
812 }
813 }
814
815 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Address address) {
816 if (ShenandoahSkipBarriers || (node->barrier_data() & ShenandoahBitCardMark) == 0) {
817 return;
818 }
819
820 assert(CardTable::dirty_card_val() == 0, "must be");
821 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
822
823 // rscratch2 = addr >> CardTable::card_shift()
824 __ lea(rscratch2, address);
825 __ lsr(rscratch2, rscratch2, CardTable::card_shift());
826
827 // rscratch1 = card table base (holder)
828 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
829 __ ldr(rscratch1, curr_ct_holder_addr);
830
831 // rscratch2 = &card_table[card_index]
832 __ add(rscratch2, rscratch1, rscratch2);
833
834 if (UseCondCardMark) {
835 Label L_already_dirty;
836 __ ldrb(rscratch1, Address(rscratch2));
837 __ cbz(rscratch1, L_already_dirty);
838 __ strb(zr, Address(rscratch2));
839 __ bind(L_already_dirty);
840 } else {
841 __ strb(zr, Address(rscratch2));
842 }
843 }
844 #undef __
845 #define __ masm.
846
847 // Only handles forward branch jumps, target_offset >= branch_offset
848 // FIXME: copied verbatim from ZGC, duplicated code.
849 static bool aarch64_test_and_branch_reachable(int branch_offset, int target_offset) {
850 assert(branch_offset >= 0, "branch to stub offsets must be positive");
851 assert(target_offset >= 0, "offset in stubs section must be positive");
852 assert(target_offset >= branch_offset, "forward branches only, branch_offset -> target_offset");
853
854 const int test_and_branch_delta_limit = 32 * K;
855
856 const int test_and_branch_to_trampoline_delta = target_offset - branch_offset;
857
858 return test_and_branch_to_trampoline_delta < test_and_branch_delta_limit;
859 }
860
861 ShenandoahBarrierStubC2::ShenandoahBarrierStubC2(const MachNode* node, Register obj, Address addr, bool narrow, bool do_load, int offset) :
862 BarrierStubC2(node),
863 _obj(obj),
864 _addr(addr),
865 _do_load(do_load),
866 _narrow(narrow),
867 _maybe_null(!src_not_null(node)),
868 _needs_load_ref_barrier(needs_load_ref_barrier(node)),
869 _needs_load_ref_weak_barrier(needs_load_ref_barrier_weak(node)),
870 _needs_keep_alive_barrier(needs_keep_alive_barrier(node)),
871 _fastpath_branch_offset(offset),
872 _test_and_branch_reachable(),
873 _skip_trampoline(),
874 _test_and_branch_reachable_entry() {
875
876 assert(!ShenandoahSkipBarriers, "Do not touch stubs when disabled");
877 assert(!_narrow || is_heap_access(node), "Only heap accesses can be narrow");
878
879 // If we are in scratch emit mode we assume worse case by leaving
880 // _test_and_branch_reachable false.
881 PhaseOutput* const output = Compile::current()->output();
882 if (output->in_scratch_emit_size()) {
883 return;
884 }
885
886 // Assume that each trampoline is one single instruction and that the stubs
887 // will follow immediatelly after the _code section. Therefore, we are
888 // checking if the distance between the fastpath branch and the
889 // trampoline/entry of the current Stub is less than 32K.
890 const int code_size = output->buffer_sizing_data()->_code;
891 const int trampoline_offset = trampoline_stubs_count() * NativeInstruction::instruction_size;
892 _test_and_branch_reachable = aarch64_test_and_branch_reachable(_fastpath_branch_offset, code_size + trampoline_offset);
893 if (_test_and_branch_reachable) {
894 inc_trampoline_stubs_count();
895 }
896 }
897
898 void ShenandoahBarrierStubC2::emit_code(MacroAssembler& masm) {
899 // If we reach here with _skip_trampoline set it means that earlier we
900 // emitted a trampoline to this stub and now we need to emit the actual stub.
901 if (_skip_trampoline) {
902 emit_code_actual(masm);
903 return;
904 }
905 _skip_trampoline = true;
906
907 // The fastpath executes two branch instructions to reach this stub, let's
908 // just emit the stub here and not add a third one.
909 if (!_test_and_branch_reachable) {
910 // By registering the stub again, after setting _skip_trampoline, we'll
911 // effectivelly cause the stub to be emitted the next time ::emit_code is
912 // called.
913 ShenandoahBarrierStubC2::register_stub(this);
914 return;
915 }
916
917 // This is entry point when coming from fastpath, IFF it's able to reach here
918 // with a test and branch instruction, otherwise the entry is
919 // ShenandoahBarrierStubC2::entry();
920 const int target_offset = __ offset();
921 __ bind(_test_and_branch_reachable_entry);
922
923 #ifdef ASSERT
924 // Current assumption is that the barrier stubs are the first stubs emitted
925 // after the actual code
926 PhaseOutput* const output = Compile::current()->output();
927 assert(stubs_start_offset() <= output->buffer_sizing_data()->_code, "stubs are assumed to be emitted directly after code and code_size is a hard limit on where it can start");
928 assert(aarch64_test_and_branch_reachable(_fastpath_branch_offset, target_offset), "trampoline should be reachable");
929 #endif
930
931 // Next fastpath branch's offset is unknown, but it's > current _fastpath_branch_offset
932 const int next_branch_offset = _fastpath_branch_offset + NativeInstruction::instruction_size;
933
934 // If emitting the current stub directly does not interfere with emission of
935 // the next potential trampoline then do it to avoid executing additional
936 // branch when coming from fastpath.
937 if (aarch64_test_and_branch_reachable(next_branch_offset, target_offset + get_stub_size())) {
938 emit_code_actual(masm);
939 } else {
940 __ b(*BarrierStubC2::entry());
941 // By registering the stub again, after setting _skip_trampoline to true,
942 // we'll effectivelly cause the stub to be emitted the next time
943 // ::emit_code is called.
944 ShenandoahBarrierStubC2::register_stub(this);
945 }
946 }
947
948 Label* ShenandoahBarrierStubC2::entry() {
949 if (_test_and_branch_reachable) {
950 return &_test_and_branch_reachable_entry;
951 }
952 return BarrierStubC2::entry();
953 }
954
955 int ShenandoahBarrierStubC2::get_stub_size() {
956 PhaseOutput* const output = Compile::current()->output();
957 assert(!output->in_scratch_emit_size(), "only used when emitting stubs");
958 BufferBlob* const blob = output->scratch_buffer_blob();
959 CodeBuffer cb(blob->content_begin(), (address)output->scratch_locs_memory() - blob->content_begin());
960 MacroAssembler masm(&cb);
961 output->set_in_scratch_emit_size(true);
962 emit_code_actual(masm);
963 output->set_in_scratch_emit_size(false);
964 return cb.insts_size();
965 }
966
967 void ShenandoahBarrierStubC2::emit_code_actual(MacroAssembler& masm) {
968 assert(_needs_keep_alive_barrier || _needs_load_ref_barrier, "Why are you here?");
969
970 // Stub entry
971 if (!Compile::current()->output()->in_scratch_emit_size()) {
972 __ bind(*BarrierStubC2::entry());
973 }
974
975 // If we need to load ourselves, do it here.
976 if (_do_load) {
977 // This does the load and the decode if necessary
978 __ load_heap_oop(_obj, _addr, noreg, noreg, AS_RAW);
979 } else if (_narrow) {
980 // If object is narrow, we need to decode it first: barrier checks need full oops.
981 if (_maybe_null) {
982 __ decode_heap_oop(_obj);
983 } else {
984 __ decode_heap_oop_not_null(_obj);
985 }
986 }
987
988 if (_do_load || _maybe_null) {
989 __ cbz(_obj, *continuation());
990 }
991
992 keepalive(&masm, _obj, rscratch1, rscratch2);
993
994 lrb(&masm, _obj, _addr, noreg);
995
996 // If object is narrow, we need to encode it before exiting.
997 // For encoding, dst can only turn null if we are dealing with weak loads.
998 // Otherwise, we have already null-checked. We can skip all this if we performed
999 // the load ourselves, which means the value is not used by caller.
1000 if (_narrow && !_do_load) {
1001 if (_needs_load_ref_weak_barrier) {
1002 __ encode_heap_oop(_obj);
1003 } else {
1004 __ encode_heap_oop_not_null(_obj);
1005 }
1006 }
1007
1008 // Go back to fast path
1009 __ b(*continuation());
1010 }
1011
1012 #undef __
1013 #define __ masm->
1014
1015 void ShenandoahBarrierStubC2::keepalive(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2) {
1016 Address index(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1017 Address buffer(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1018 Label L_runtime;
1019 Label L_done;
1020
1021 // The node doesn't even need keepalive barrier, just don't check anything else
1022 if (!_needs_keep_alive_barrier) {
1023 return ;
1024 }
1025
1026 // If both LRB and KeepAlive barriers are required (rare), do a runtime check
1027 // for enabled barrier.
1028 if (_needs_load_ref_barrier) {
1029 Address gcs_addr(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1030 __ ldrb(rscratch1, gcs_addr);
1031 __ tbz(rscratch1, ShenandoahHeap::MARKING_BITPOS, L_done);
1032 }
1033
1034 // If buffer is full, call into runtime.
1035 __ ldr(tmp1, index);
1036 __ cbz(tmp1, L_runtime);
1037
1038 // The buffer is not full, store value into it.
1039 __ sub(tmp1, tmp1, wordSize);
1040 __ str(tmp1, index);
1041 __ ldr(tmp2, buffer);
1042 __ str(obj, Address(tmp2, tmp1));
1043 __ b(L_done);
1044
1045 // Runtime call
1046 __ bind(L_runtime);
1047
1048 preserve(obj);
1049 {
1050 SaveLiveRegisters save_registers(masm, this);
1051 __ mov(c_rarg0, obj);
1052 __ mov(tmp1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre));
1053 __ blr(tmp1);
1054 }
1055
1056 __ bind(L_done);
1057 }
1058
1059 void ShenandoahBarrierStubC2::lrb(MacroAssembler* masm, Register obj, Address addr, Register tmp) {
1060 Label L_done;
1061
1062 // The node doesn't even need LRB barrier, just don't check anything else
1063 if (!_needs_load_ref_barrier) {
1064 return ;
1065 }
1066
1067 if ((_node->barrier_data() & ShenandoahBitStrong) != 0) {
1068 // If both LRB and KeepAlive barriers are required (rare), do a runtime
1069 // check for enabled barrier.
1070 if (_needs_keep_alive_barrier) {
1071 Address gcs_addr(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1072 __ ldrb(rscratch1, gcs_addr);
1073 if (_needs_load_ref_weak_barrier) {
1074 __ orr(rscratch1, rscratch1, rscratch1, Assembler::LSR, ShenandoahHeap::WEAK_ROOTS_BITPOS);
1075 }
1076 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, L_done);
1077 }
1078
1079 // Weak/phantom loads always need to go to runtime. For strong refs we
1080 // check if the object in cset, if they are not, then we are done with LRB.
1081 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
1082 __ lsr(rscratch1, obj, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1083 __ ldrb(rscratch2, Address(rscratch2, rscratch1));
1084 __ cbz(rscratch2, L_done);
1085 }
1086
1087 dont_preserve(obj);
1088 {
1089 SaveLiveRegisters save_registers(masm, this);
1090
1091 // Shuffle in the arguments. The end result should be:
1092 // c_rarg0 <-- obj
1093 // c_rarg1 <-- lea(addr)
1094 if (c_rarg0 == obj) {
1095 __ lea(c_rarg1, addr);
1096 } else if (c_rarg1 == obj) {
1097 // Set up arguments in reverse, and then flip them
1098 __ lea(c_rarg0, addr);
1099 // flip them
1100 __ mov(rscratch1, c_rarg0);
1101 __ mov(c_rarg0, c_rarg1);
1102 __ mov(c_rarg1, rscratch1);
1103 } else {
1104 assert_different_registers(c_rarg1, obj);
1105 __ lea(c_rarg1, addr);
1106 __ mov(c_rarg0, obj);
1107 }
1108
1109 // Get address of runtime LRB entry and call it
1110 __ mov(rscratch1, lrb_runtime_entry_addr());
1111 __ blr(rscratch1);
1112
1113 // If we loaded the object in the stub it means we don't need to return it
1114 // to fastpath, so no need to make this mov.
1115 if (!_do_load) {
1116 __ mov(obj, r0);
1117 }
1118 }
1119
1120 __ bind(L_done);
1121 }
1122
1123 #endif // COMPILER2
1124
1125 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
1126 Register start, Register count, Register scratch) {
1127 assert(ShenandoahCardBarrier, "Should have been checked by caller");
1128
1129 Label L_loop, L_done;
1130 const Register end = count;
1131
1132 // Zero count? Nothing to do.
1133 __ cbz(count, L_done);
1134
1135 // end = start + count << LogBytesPerHeapOop
1136 // last element address to make inclusive
1137 __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop)));
1138 __ sub(end, end, BytesPerHeapOop);
1139 __ lsr(start, start, CardTable::card_shift());
1140 __ lsr(end, end, CardTable::card_shift());
1141
1142 // number of bytes to copy
1143 __ sub(count, end, start);
1144
1145 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
1146 __ ldr(scratch, curr_ct_holder_addr);
1147 __ add(start, start, scratch);
1148 __ bind(L_loop);
1149 __ strb(zr, Address(start, count));
1150 __ subs(count, count, 1);
1151 __ br(Assembler::GE, L_loop);
1152 __ bind(L_done);
1153 }
1154
1155 #undef __
1156
1157 #ifdef COMPILER1
1158
1159 #define __ ce->masm()->
1160
1161 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
1162 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1163 // At this point we know that marking is in progress.
1164 // If do_load() is true then we have to emit the
1165 // load of the previous value; otherwise it has already
1166 // been loaded into _pre_val.
1167
1168 __ bind(*stub->entry());
1169
1170 assert(stub->pre_val()->is_register(), "Precondition.");
1171
1172 Register pre_val_reg = stub->pre_val()->as_register();
1173
1174 if (stub->do_load()) {
1175 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
1176 }
1177 __ cbz(pre_val_reg, *stub->continuation());
1178 ce->store_parameter(stub->pre_val()->as_register(), 0);
1179 __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
1180 __ b(*stub->continuation());
1181 }
1182
1183 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
1184 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1185 __ bind(*stub->entry());
1186
1187 DecoratorSet decorators = stub->decorators();
1188 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1189 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1190 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1191 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1192
1193 Register obj = stub->obj()->as_register();
1194 Register res = stub->result()->as_register();
1195 Register addr = stub->addr()->as_pointer_register();
1196 Register tmp1 = stub->tmp1()->as_register();
1197 Register tmp2 = stub->tmp2()->as_register();
1198
1199 assert(res == r0, "result must arrive in r0");
1200
1201 if (res != obj) {
1202 __ mov(res, obj);
1203 }
1204
1205 if (is_strong) {
1206 // Check for object in cset.
1207 __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
1208 __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1209 __ ldrb(tmp2, Address(tmp2, tmp1));
1210 __ cbz(tmp2, *stub->continuation());
1211 }
1212
1213 ce->store_parameter(res, 0);
1214 ce->store_parameter(addr, 1);
1215 if (is_strong) {
1216 if (is_native) {
1217 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
1218 } else {
1219 __ far_call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
1220 }
1221 } else if (is_weak) {
1222 __ far_call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
1223 } else {
1224 assert(is_phantom, "only remaining strength");
1225 __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
1226 }
1227
1228 __ b(*stub->continuation());
1229 }
1230
1231 #undef __
1232
1233 #define __ sasm->
1234
1235 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
1236 __ prologue("shenandoah_pre_barrier", false);
1237
1238 // arg0 : previous value of memory
1239
1240 BarrierSet* bs = BarrierSet::barrier_set();
1241
1242 const Register pre_val = r0;
1243 const Register thread = rthread;
1244 const Register tmp = rscratch1;
1245
1246 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1247 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1248
1249 Label done;
1250 Label runtime;
1251
1252 // Is marking still active?
1253 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1254 __ ldrb(tmp, gc_state);
1255 __ tbz(tmp, ShenandoahHeap::MARKING_BITPOS, done);
1256
1257 // Can we store original value in the thread's buffer?
1258 __ ldr(tmp, queue_index);
1259 __ cbz(tmp, runtime);
1260
1261 __ sub(tmp, tmp, wordSize);
1262 __ str(tmp, queue_index);
1263 __ ldr(rscratch2, buffer);
1264 __ add(tmp, tmp, rscratch2);
1265 __ load_parameter(0, rscratch2);
1266 __ str(rscratch2, Address(tmp, 0));
1267 __ b(done);
1268
1269 __ bind(runtime);
1270 __ push_call_clobbered_registers();
1271 __ load_parameter(0, pre_val);
1272 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
1273 __ pop_call_clobbered_registers();
1274 __ bind(done);
1275
1276 __ epilogue();
1277 }
1278
1279 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
1280 __ prologue("shenandoah_load_reference_barrier", false);
1281 // arg0 : object to be resolved
1282
1283 __ push_call_clobbered_registers();
1284 __ load_parameter(0, r0);
1285 __ load_parameter(1, r1);
1286
1287 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1288 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1289 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1290 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1291 if (is_strong) {
1292 if (is_native) {
1293 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
1294 } else {
1295 if (UseCompressedOops) {
1296 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
1297 } else {
1298 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
1299 }
1300 }
1301 } else if (is_weak) {
1302 assert(!is_native, "weak must not be called off-heap");
1303 if (UseCompressedOops) {
1304 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
1305 } else {
1306 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
1307 }
1308 } else {
1309 assert(is_phantom, "only remaining strength");
1310 assert(is_native, "phantom must only be called off-heap");
1311 __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
1312 }
1313 __ blr(lr);
1314 __ mov(rscratch1, r0);
1315 __ pop_call_clobbered_registers();
1316 __ mov(r0, rscratch1);
1317
1318 __ epilogue();
1319 }
1320
1321 #undef __
1322
1323 #endif // COMPILER1