1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright (c) 2012, 2022 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shared/gcArguments.hpp"
27 #include "gc/shared/gc_globals.hpp"
28 #include "macroAssembler_ppc.hpp"
29 #include "precompiled.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
32 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
33 #include "gc/shenandoah/shenandoahForwarding.hpp"
34 #include "gc/shenandoah/shenandoahHeap.hpp"
35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
36 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
37 #include "gc/shenandoah/shenandoahRuntime.hpp"
38 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
39 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
40 #include "gc/shenandoah/mode/shenandoahMode.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "runtime/javaThread.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/globalDefinitions.hpp"
45 #include "vm_version_ppc.hpp"
46 #ifdef COMPILER1
47 #include "c1/c1_LIRAssembler.hpp"
48 #include "c1/c1_MacroAssembler.hpp"
49 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
50 #endif
51
52 #define __ masm->
53
54 void ShenandoahBarrierSetAssembler::satb_write_barrier(MacroAssembler *masm,
55 Register base, RegisterOrConstant ind_or_offs,
56 Register tmp1, Register tmp2, Register tmp3,
57 MacroAssembler::PreservationLevel preservation_level) {
58 if (ShenandoahSATBBarrier) {
59 __ block_comment("satb_write_barrier (shenandoahgc) {");
60 satb_write_barrier_impl(masm, 0, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
61 __ block_comment("} satb_write_barrier (shenandoahgc)");
62 }
63 }
64
65 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler *masm, DecoratorSet decorators,
66 Register base, RegisterOrConstant ind_or_offs,
67 Register dst,
68 Register tmp1, Register tmp2,
69 MacroAssembler::PreservationLevel preservation_level) {
70 if (ShenandoahLoadRefBarrier) {
71 __ block_comment("load_reference_barrier (shenandoahgc) {");
72 load_reference_barrier_impl(masm, decorators, base, ind_or_offs, dst, tmp1, tmp2, preservation_level);
73 __ block_comment("} load_reference_barrier (shenandoahgc)");
74 }
75 }
76
77 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, DecoratorSet decorators, BasicType type,
78 Register src, Register dst, Register count,
79 Register preserve1, Register preserve2) {
80 Register R11_tmp = R11_scratch1;
81
82 assert_different_registers(src, dst, count, R11_tmp, noreg);
83 if (preserve1 != noreg) {
84 // Technically not required, but likely to indicate an error.
85 assert_different_registers(preserve1, preserve2);
86 }
87
88 /* ==== Check whether barrier is required (optimizations) ==== */
89 // Fast path: Component type of array is not a reference type.
90 if (!is_reference_type(type)) {
91 return;
92 }
93
94 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
95
96 // Fast path: No barrier required if for every barrier type, it is either disabled or would not store
97 // any useful information.
98 if ((!ShenandoahSATBBarrier || dest_uninitialized) && !ShenandoahLoadRefBarrier) {
99 return;
100 }
101
102 __ block_comment("arraycopy_prologue (shenandoahgc) {");
103 Label skip_prologue;
104
105 // Fast path: Array is of length zero.
106 __ cmpdi(CCR0, count, 0);
107 __ beq(CCR0, skip_prologue);
108
109 /* ==== Check whether barrier is required (gc state) ==== */
110 __ lbz(R11_tmp, in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
111 R16_thread);
112
113 // The set of garbage collection states requiring barriers depends on the available barrier types and the
114 // type of the reference in question.
115 // For instance, satb barriers may be skipped if it is certain that the overridden values are not relevant
116 // for the garbage collector.
117 const int required_states = ShenandoahSATBBarrier && dest_uninitialized
118 ? ShenandoahHeap::HAS_FORWARDED
119 : ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
120
121 __ andi_(R11_tmp, R11_tmp, required_states);
122 __ beq(CCR0, skip_prologue);
123
124 /* ==== Invoke runtime ==== */
125 // Save to-be-preserved registers.
126 int highest_preserve_register_index = 0;
127 {
128 if (preserve1 != noreg && preserve1->is_volatile()) {
129 __ std(preserve1, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
130 }
131 if (preserve2 != noreg && preserve2 != preserve1 && preserve2->is_volatile()) {
132 __ std(preserve2, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
133 }
134
135 __ std(src, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
136 __ std(dst, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
137 __ std(count, -BytesPerWord * ++highest_preserve_register_index, R1_SP);
138
139 __ save_LR_CR(R11_tmp);
140 __ push_frame_reg_args(-BytesPerWord * highest_preserve_register_index,
141 R11_tmp);
142 }
143
144 // Invoke runtime.
145 address jrt_address = nullptr;
146 if (UseCompressedOops) {
147 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry);
148 } else {
149 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry);
150 }
151 assert(jrt_address != nullptr, "jrt routine cannot be found");
152
153 __ call_VM_leaf(jrt_address, src, dst, count);
154
155 // Restore to-be-preserved registers.
156 {
157 __ pop_frame();
158 __ restore_LR_CR(R11_tmp);
159
160 __ ld(count, -BytesPerWord * highest_preserve_register_index--, R1_SP);
161 __ ld(dst, -BytesPerWord * highest_preserve_register_index--, R1_SP);
162 __ ld(src, -BytesPerWord * highest_preserve_register_index--, R1_SP);
163
164 if (preserve2 != noreg && preserve2 != preserve1 && preserve2->is_volatile()) {
165 __ ld(preserve2, -BytesPerWord * highest_preserve_register_index--, R1_SP);
166 }
167 if (preserve1 != noreg && preserve1->is_volatile()) {
168 __ ld(preserve1, -BytesPerWord * highest_preserve_register_index--, R1_SP);
169 }
170 }
171
172 __ bind(skip_prologue);
173 __ block_comment("} arraycopy_prologue (shenandoahgc)");
174 }
175
176 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
177 Register dst, Register count,
178 Register preserve) {
179 if (ShenandoahCardBarrier && is_reference_type(type)) {
180 __ block_comment("arraycopy_epilogue (shenandoahgc) {");
181 gen_write_ref_array_post_barrier(masm, decorators, dst, count, preserve);
182 __ block_comment("} arraycopy_epilogue (shenandoahgc)");
183 }
184 }
185
186 // The to-be-enqueued value can either be determined
187 // - dynamically by passing the reference's address information (load mode) or
188 // - statically by passing a register the value is stored in (preloaded mode)
189 // - for performance optimizations in cases where the previous value is known (currently not implemented) and
190 // - for incremental-update barriers.
191 //
192 // decorators: The previous value's decorator set.
193 // In "load mode", the value must equal '0'.
194 // base: Base register of the reference's address (load mode).
195 // In "preloaded mode", the register must equal 'noreg'.
196 // ind_or_offs: Index or offset of the reference's address (load mode).
197 // If 'base' equals 'noreg' (preloaded mode), the passed value is ignored.
198 // pre_val: Register holding the to-be-stored value (preloaded mode).
199 // In "load mode", this register acts as a temporary register and must
200 // thus not be 'noreg'. In "preloaded mode", its content will be sustained.
201 // tmp1/tmp2: Temporary registers, one of which must be non-volatile in "preloaded mode".
202 void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm, DecoratorSet decorators,
203 Register base, RegisterOrConstant ind_or_offs,
204 Register pre_val,
205 Register tmp1, Register tmp2,
206 MacroAssembler::PreservationLevel preservation_level) {
207 assert_different_registers(tmp1, tmp2, pre_val, noreg);
208
209 Label skip_barrier;
210
211 /* ==== Determine necessary runtime invocation preservation measures ==== */
212 const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
213 const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
214 const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
215
216 // Check whether marking is active.
217 __ lbz(tmp1, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
218
219 __ andi_(tmp1, tmp1, ShenandoahHeap::MARKING);
220 __ beq(CCR0, skip_barrier);
221
222 /* ==== Determine the reference's previous value ==== */
223 bool preloaded_mode = base == noreg;
224 Register pre_val_save = noreg;
225
226 if (preloaded_mode) {
227 // Previous value has been passed to the method, so it must not be determined manually.
228 // In case 'pre_val' is a volatile register, it must be saved across the C-call
229 // as callers may depend on its value.
230 // Unless the general purposes registers are saved anyway, one of the temporary registers
231 // (i.e., 'tmp1' and 'tmp2') is used to the preserve 'pre_val'.
232 if (!preserve_gp_registers && pre_val->is_volatile()) {
233 pre_val_save = !tmp1->is_volatile() ? tmp1 : tmp2;
234 assert(!pre_val_save->is_volatile(), "at least one of the temporary registers must be non-volatile");
235 }
236
237 if ((decorators & IS_NOT_NULL) != 0) {
238 #ifdef ASSERT
239 __ cmpdi(CCR0, pre_val, 0);
240 __ asm_assert_ne("null oop is not allowed");
241 #endif // ASSERT
242 } else {
243 __ cmpdi(CCR0, pre_val, 0);
244 __ beq(CCR0, skip_barrier);
245 }
246 } else {
247 // Load from the reference address to determine the reference's current value (before the store is being performed).
248 // Contrary to the given value in "preloaded mode", it is not necessary to preserve it.
249 assert(decorators == 0, "decorator set must be empty");
250 assert(base != noreg, "base must be a register");
251 assert(!ind_or_offs.is_register() || ind_or_offs.as_register() != noreg, "ind_or_offs must be a register");
252 if (UseCompressedOops) {
253 __ lwz(pre_val, ind_or_offs, base);
254 } else {
255 __ ld(pre_val, ind_or_offs, base);
256 }
257
258 __ cmpdi(CCR0, pre_val, 0);
259 __ beq(CCR0, skip_barrier);
260
261 if (UseCompressedOops) {
262 __ decode_heap_oop_not_null(pre_val);
263 }
264 }
265
266 /* ==== Try to enqueue the to-be-stored value directly into thread's local SATB mark queue ==== */
267 {
268 Label runtime;
269 Register Rbuffer = tmp1, Rindex = tmp2;
270
271 // Check whether the queue has enough capacity to store another oop.
272 // If not, jump to the runtime to commit the buffer and to allocate a new one.
273 // (The buffer's index corresponds to the amount of remaining free space.)
274 __ ld(Rindex, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
275 __ cmpdi(CCR0, Rindex, 0);
276 __ beq(CCR0, runtime); // If index == 0 (buffer is full), goto runtime.
277
278 // Capacity suffices. Decrement the queue's size by the size of one oop.
279 // (The buffer is filled contrary to the heap's growing direction, i.e., it is filled downwards.)
280 __ addi(Rindex, Rindex, -wordSize);
281 __ std(Rindex, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
282
283 // Enqueue the previous value and skip the invocation of the runtime.
284 __ ld(Rbuffer, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);
285 __ stdx(pre_val, Rbuffer, Rindex);
286 __ b(skip_barrier);
287
288 __ bind(runtime);
289 }
290
291 /* ==== Invoke runtime to commit SATB mark queue to gc and allocate a new buffer ==== */
292 // Save to-be-preserved registers.
293 int nbytes_save = 0;
294
295 if (needs_frame) {
296 if (preserve_gp_registers) {
297 nbytes_save = (preserve_fp_registers
298 ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
299 : MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
300 __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
301 }
302
303 __ save_LR_CR(tmp1);
304 __ push_frame_reg_args(nbytes_save, tmp2);
305 }
306
307 if (!preserve_gp_registers && preloaded_mode && pre_val->is_volatile()) {
308 assert(pre_val_save != noreg, "nv_save must not be noreg");
309
310 // 'pre_val' register must be saved manually unless general-purpose are preserved in general.
311 __ mr(pre_val_save, pre_val);
312 }
313
314 // Invoke runtime.
315 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, R16_thread);
316
317 // Restore to-be-preserved registers.
318 if (!preserve_gp_registers && preloaded_mode && pre_val->is_volatile()) {
319 __ mr(pre_val, pre_val_save);
320 }
321
322 if (needs_frame) {
323 __ pop_frame();
324 __ restore_LR_CR(tmp1);
325
326 if (preserve_gp_registers) {
327 __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
328 }
329 }
330
331 __ bind(skip_barrier);
332 }
333
334 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler *masm, Register dst, Register tmp) {
335 __ block_comment("resolve_forward_pointer_not_null (shenandoahgc) {");
336
337 Register tmp1 = tmp,
338 R0_tmp2 = R0;
339 assert_different_registers(dst, tmp1, R0_tmp2, noreg);
340
341 // If the object has been evacuated, the mark word layout is as follows:
342 // | forwarding pointer (62-bit) | '11' (2-bit) |
343
344 // The invariant that stack/thread pointers have the lowest two bits cleared permits retrieving
345 // the forwarding pointer solely by inversing the lowest two bits.
346 // This invariant follows inevitably from hotspot's minimal alignment.
347 assert(markWord::marked_value <= (unsigned long) MinObjAlignmentInBytes,
348 "marked value must not be higher than hotspot's minimal alignment");
349
350 Label done;
351
352 // Load the object's mark word.
353 __ ld(tmp1, oopDesc::mark_offset_in_bytes(), dst);
354
355 // Load the bit mask for the lock bits.
356 __ li(R0_tmp2, markWord::lock_mask_in_place);
357
358 // Check whether all bits matching the bit mask are set.
359 // If that is the case, the object has been evacuated and the most significant bits form the forward pointer.
360 __ andc_(R0_tmp2, R0_tmp2, tmp1);
361
362 assert(markWord::lock_mask_in_place == markWord::marked_value,
363 "marked value must equal the value obtained when all lock bits are being set");
364 if (VM_Version::has_isel()) {
365 __ xori(tmp1, tmp1, markWord::lock_mask_in_place);
366 __ isel(dst, CCR0, Assembler::equal, false, tmp1);
367 } else {
368 __ bne(CCR0, done);
369 __ xori(dst, tmp1, markWord::lock_mask_in_place);
370 }
371
372 __ bind(done);
373 __ block_comment("} resolve_forward_pointer_not_null (shenandoahgc)");
374 }
375
376 // base: Base register of the reference's address.
377 // ind_or_offs: Index or offset of the reference's address (load mode).
378 // dst: Reference's address. In case the object has been evacuated, this is the to-space version
379 // of that object.
380 void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
381 MacroAssembler *masm, DecoratorSet decorators,
382 Register base, RegisterOrConstant ind_or_offs,
383 Register dst,
384 Register tmp1, Register tmp2,
385 MacroAssembler::PreservationLevel preservation_level) {
386 if (ind_or_offs.is_register()) {
387 assert_different_registers(tmp1, tmp2, base, ind_or_offs.as_register(), dst, noreg);
388 } else {
389 assert_different_registers(tmp1, tmp2, base, dst, noreg);
390 }
391
392 Label skip_barrier;
393
394 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
395 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
396 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
397 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
398 bool is_narrow = UseCompressedOops && !is_native;
399
400 /* ==== Check whether heap is stable ==== */
401 __ lbz(tmp2, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
402
403 if (is_strong) {
404 // For strong references, the heap is considered stable if "has forwarded" is not active.
405 __ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
406 __ beq(CCR0, skip_barrier);
407 #ifdef ASSERT
408 // "evacuation" -> (implies) "has forwarded". If we reach this code, "has forwarded" must thus be set.
409 __ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
410 __ asm_assert_ne("'has forwarded' is missing");
411 #endif // ASSERT
412 } else {
413 // For all non-strong references, the heap is considered stable if not any of "has forwarded",
414 // "root set processing", and "weak reference processing" is active.
415 // The additional phase conditions are in place to avoid the resurrection of weak references (see JDK-8266440).
416 Label skip_fastpath;
417 __ andi_(tmp1, tmp2, ShenandoahHeap::WEAK_ROOTS);
418 __ bne(CCR0, skip_fastpath);
419
420 __ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
421 __ beq(CCR0, skip_barrier);
422 #ifdef ASSERT
423 // "evacuation" -> (implies) "has forwarded". If we reach this code, "has forwarded" must thus be set.
424 __ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
425 __ asm_assert_ne("'has forwarded' is missing");
426 #endif // ASSERT
427
428 __ bind(skip_fastpath);
429 }
430
431 /* ==== Check whether region is in collection set ==== */
432 if (is_strong) {
433 // Shenandoah stores metadata on regions in a continuous area of memory in which a single byte corresponds to
434 // an entire region of the shenandoah heap. At present, only the least significant bit is of significance
435 // and indicates whether the region is part of the collection set.
436 //
437 // All regions are of the same size and are always aligned by a power of two.
438 // Any address can thus be shifted by a fixed number of bits to retrieve the address prefix shared by
439 // all objects within that region (region identification bits).
440 //
441 // | unused bits | region identification bits | object identification bits |
442 // (Region size depends on a couple of criteria, such as page size, user-provided arguments and the max heap size.
443 // The number of object identification bits can thus not be determined at compile time.)
444 //
445 // ------------------------------------------------------- <--- cs (collection set) base address
446 // | lost space due to heap space base address -> 'ShenandoahHeap::in_cset_fast_test_addr()'
447 // | (region identification bits contain heap base offset)
448 // |------------------------------------------------------ <--- cs base address + (heap_base >> region size shift)
449 // | collection set in the proper -> shift: 'region_size_bytes_shift_jint()'
450 // |
451 // |------------------------------------------------------ <--- cs base address + (heap_base >> region size shift)
452 // + number of regions
453 __ load_const_optimized(tmp2, ShenandoahHeap::in_cset_fast_test_addr(), tmp1);
454 __ srdi(tmp1, dst, ShenandoahHeapRegion::region_size_bytes_shift_jint());
455 __ lbzx(tmp2, tmp1, tmp2);
456 __ andi_(tmp2, tmp2, 1);
457 __ beq(CCR0, skip_barrier);
458 }
459
460 /* ==== Invoke runtime ==== */
461 // Save to-be-preserved registers.
462 int nbytes_save = 0;
463
464 const bool needs_frame = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR;
465 const bool preserve_gp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_REGS;
466 const bool preserve_fp_registers = preservation_level >= MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS;
467
468 if (needs_frame) {
469 if (preserve_gp_registers) {
470 nbytes_save = (preserve_fp_registers
471 ? MacroAssembler::num_volatile_gp_regs + MacroAssembler::num_volatile_fp_regs
472 : MacroAssembler::num_volatile_gp_regs) * BytesPerWord;
473 __ save_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
474 }
475
476 __ save_LR_CR(tmp1);
477 __ push_frame_reg_args(nbytes_save, tmp1);
478 }
479
480 // Calculate the reference's absolute address.
481 __ add(R4_ARG2, ind_or_offs, base);
482
483 // Invoke runtime.
484 address jrt_address = nullptr;
485
486 if (is_strong) {
487 if (is_narrow) {
488 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
489 } else {
490 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
491 }
492 } else if (is_weak) {
493 if (is_narrow) {
494 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
495 } else {
496 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
497 }
498 } else {
499 assert(is_phantom, "only remaining strength");
500 assert(!is_narrow, "phantom access cannot be narrow");
501 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
502 }
503 assert(jrt_address != nullptr, "jrt routine cannot be found");
504
505 __ call_VM_leaf(jrt_address, dst /* reference */, R4_ARG2 /* reference address */);
506
507 // Restore to-be-preserved registers.
508 if (preserve_gp_registers) {
509 __ mr(R0, R3_RET);
510 } else {
511 __ mr_if_needed(dst, R3_RET);
512 }
513
514 if (needs_frame) {
515 __ pop_frame();
516 __ restore_LR_CR(tmp1);
517
518 if (preserve_gp_registers) {
519 __ restore_volatile_gprs(R1_SP, -nbytes_save, preserve_fp_registers);
520 __ mr(dst, R0);
521 }
522 }
523
524 __ bind(skip_barrier);
525 }
526
527 // base: Base register of the reference's address.
528 // ind_or_offs: Index or offset of the reference's address.
529 // L_handle_null: An optional label that will be jumped to if the reference is null.
530 void ShenandoahBarrierSetAssembler::load_at(
531 MacroAssembler *masm, DecoratorSet decorators, BasicType type,
532 Register base, RegisterOrConstant ind_or_offs, Register dst,
533 Register tmp1, Register tmp2,
534 MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null) {
535 // Register must not clash, except 'base' and 'dst'.
536 if (ind_or_offs.is_register()) {
537 if (base != noreg) {
538 assert_different_registers(tmp1, tmp2, base, ind_or_offs.register_or_noreg(), R0, noreg);
539 }
540 assert_different_registers(tmp1, tmp2, dst, ind_or_offs.register_or_noreg(), R0, noreg);
541 } else {
542 if (base == noreg) {
543 assert_different_registers(tmp1, tmp2, base, R0, noreg);
544 }
545 assert_different_registers(tmp1, tmp2, dst, R0, noreg);
546 }
547
548 /* ==== Apply load barrier, if required ==== */
549 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
550 assert(is_reference_type(type), "need_load_reference_barrier must check whether type is a reference type");
551
552 // If 'dst' clashes with either 'base' or 'ind_or_offs', use an intermediate result register
553 // to keep the values of those alive until the load reference barrier is applied.
554 Register intermediate_dst = (dst == base || (ind_or_offs.is_register() && dst == ind_or_offs.as_register()))
555 ? tmp2
556 : dst;
557
558 BarrierSetAssembler::load_at(masm, decorators, type,
559 base, ind_or_offs,
560 intermediate_dst,
561 tmp1, noreg,
562 preservation_level, L_handle_null);
563
564 load_reference_barrier(masm, decorators,
565 base, ind_or_offs,
566 intermediate_dst,
567 tmp1, R0,
568 preservation_level);
569
570 __ mr_if_needed(dst, intermediate_dst);
571 } else {
572 BarrierSetAssembler::load_at(masm, decorators, type,
573 base, ind_or_offs,
574 dst,
575 tmp1, tmp2,
576 preservation_level, L_handle_null);
577 }
578
579 /* ==== Apply keep-alive barrier, if required (e.g., to inhibit weak reference resurrection) ==== */
580 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
581 if (ShenandoahSATBBarrier) {
582 __ block_comment("keep_alive_barrier (shenandoahgc) {");
583 satb_write_barrier_impl(masm, 0, noreg, noreg, dst, tmp1, tmp2, preservation_level);
584 __ block_comment("} keep_alive_barrier (shenandoahgc)");
585 }
586 }
587 }
588
589 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register base, RegisterOrConstant ind_or_offs, Register tmp) {
590 assert(ShenandoahCardBarrier, "Should have been checked by caller");
591 assert_different_registers(base, tmp, R0);
592
593 if (ind_or_offs.is_constant()) {
594 __ add_const_optimized(base, base, ind_or_offs.as_constant(), tmp);
595 } else {
596 __ add(base, ind_or_offs.as_register(), base);
597 }
598
599 __ ld(tmp, in_bytes(ShenandoahThreadLocalData::card_table_offset()), R16_thread); /* tmp = *[R16_thread + card_table_offset] */
600 __ srdi(base, base, CardTable::card_shift());
601 __ li(R0, CardTable::dirty_card_val());
602 __ stbx(R0, tmp, base);
603 }
604
605 // base: Base register of the reference's address.
606 // ind_or_offs: Index or offset of the reference's address.
607 // val: To-be-stored value/reference's new value.
608 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler *masm, DecoratorSet decorators, BasicType type,
609 Register base, RegisterOrConstant ind_or_offs, Register val,
610 Register tmp1, Register tmp2, Register tmp3,
611 MacroAssembler::PreservationLevel preservation_level) {
612 if (is_reference_type(type)) {
613 if (ShenandoahSATBBarrier) {
614 satb_write_barrier(masm, base, ind_or_offs, tmp1, tmp2, tmp3, preservation_level);
615 }
616 }
617
618 BarrierSetAssembler::store_at(masm, decorators, type,
619 base, ind_or_offs,
620 val,
621 tmp1, tmp2, tmp3,
622 preservation_level);
623
624 // No need for post barrier if storing NULL
625 if (ShenandoahCardBarrier && is_reference_type(type) && val != noreg) {
626 store_check(masm, base, ind_or_offs, tmp1);
627 }
628 }
629
630 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler *masm,
631 Register dst, Register jni_env, Register obj,
632 Register tmp, Label &slowpath) {
633 __ block_comment("try_resolve_jobject_in_native (shenandoahgc) {");
634
635 assert_different_registers(jni_env, obj, tmp);
636
637 Label done;
638
639 // Fast path: Reference is null (JNI tags are zero for null pointers).
640 __ cmpdi(CCR0, obj, 0);
641 __ beq(CCR0, done);
642
643 // Resolve jobject using standard implementation.
644 BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
645
646 // Check whether heap is stable.
647 __ lbz(tmp,
648 in_bytes(ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()),
649 jni_env);
650
651 __ andi_(tmp, tmp, ShenandoahHeap::EVACUATION | ShenandoahHeap::HAS_FORWARDED);
652 __ bne(CCR0, slowpath);
653
654 __ bind(done);
655 __ block_comment("} try_resolve_jobject_in_native (shenandoahgc)");
656 }
657
658 // Special shenandoah CAS implementation that handles false negatives due
659 // to concurrent evacuation. That is, the CAS operation is intended to succeed in
660 // the following scenarios (success criteria):
661 // s1) The reference pointer ('base_addr') equals the expected ('expected') pointer.
662 // s2) The reference pointer refers to the from-space version of an already-evacuated
663 // object, whereas the expected pointer refers to the to-space version of the same object.
664 // Situations in which the reference pointer refers to the to-space version of an object
665 // and the expected pointer refers to the from-space version of the same object can not occur due to
666 // shenandoah's strong to-space invariant. This also implies that the reference stored in 'new_val'
667 // can not refer to the from-space version of an already-evacuated object.
668 //
669 // To guarantee correct behavior in concurrent environments, two races must be addressed:
670 // r1) A concurrent thread may heal the reference pointer (i.e., it is no longer referring to the
671 // from-space version but to the to-space version of the object in question).
672 // In this case, the CAS operation should succeed.
673 // r2) A concurrent thread may mutate the reference (i.e., the reference pointer refers to an entirely different object).
674 // In this case, the CAS operation should fail.
675 //
676 // By default, the value held in the 'result' register is zero to indicate failure of CAS,
677 // non-zero to indicate success. If 'is_cae' is set, the result is the most recently fetched
678 // value from 'base_addr' rather than a boolean success indicator.
679 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register base_addr,
680 Register expected, Register new_val, Register tmp1, Register tmp2,
681 bool is_cae, Register result) {
682 __ block_comment("cmpxchg_oop (shenandoahgc) {");
683
684 assert_different_registers(base_addr, new_val, tmp1, tmp2, result, R0);
685 assert_different_registers(base_addr, expected, tmp1, tmp2, result, R0);
686
687 // Potential clash of 'success_flag' and 'tmp' is being accounted for.
688 Register success_flag = is_cae ? noreg : result,
689 current_value = is_cae ? result : tmp1,
690 tmp = is_cae ? tmp1 : result,
691 initial_value = tmp2;
692
693 Label done, step_four;
694
695 __ bind(step_four);
696
697 /* ==== Step 1 ("Standard" CAS) ==== */
698 // Fast path: The values stored in 'expected' and 'base_addr' are equal.
699 // Given that 'expected' must refer to the to-space object of an evacuated object (strong to-space invariant),
700 // no special processing is required.
701 if (UseCompressedOops) {
702 __ cmpxchgw(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
703 false, success_flag, true);
704 } else {
705 __ cmpxchgd(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
706 false, success_flag, nullptr, true);
707 }
708
709 // Skip the rest of the barrier if the CAS operation succeeds immediately.
710 // If it does not, the value stored at the address is either the from-space pointer of the
711 // referenced object (success criteria s2)) or simply another object.
712 __ beq(CCR0, done);
713
714 /* ==== Step 2 (Null check) ==== */
715 // The success criteria s2) cannot be matched with a null pointer
716 // (null pointers cannot be subject to concurrent evacuation). The failure of the CAS operation is thus legitimate.
717 __ cmpdi(CCR0, current_value, 0);
718 __ beq(CCR0, done);
719
720 /* ==== Step 3 (reference pointer refers to from-space version; success criteria s2)) ==== */
721 // To check whether the reference pointer refers to the from-space version, the forward
722 // pointer of the object referred to by the reference is resolved and compared against the expected pointer.
723 // If this check succeed, another CAS operation is issued with the from-space pointer being the expected pointer.
724 //
725 // Save the potential from-space pointer.
726 __ mr(initial_value, current_value);
727
728 // Resolve forward pointer.
729 if (UseCompressedOops) { __ decode_heap_oop_not_null(current_value); }
730 resolve_forward_pointer_not_null(masm, current_value, tmp);
731 if (UseCompressedOops) { __ encode_heap_oop_not_null(current_value); }
732
733 if (!is_cae) {
734 // 'success_flag' was overwritten by call to 'resovle_forward_pointer_not_null'.
735 // Load zero into register for the potential failure case.
736 __ li(success_flag, 0);
737 }
738 __ cmpd(CCR0, current_value, expected);
739 __ bne(CCR0, done);
740
741 // Discard fetched value as it might be a reference to the from-space version of an object.
742 if (UseCompressedOops) {
743 __ cmpxchgw(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
744 false, success_flag);
745 } else {
746 __ cmpxchgd(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
747 false, success_flag);
748 }
749
750 /* ==== Step 4 (Retry CAS with to-space pointer (success criteria s2) under race r1)) ==== */
751 // The reference pointer could have been healed whilst the previous CAS operation was being performed.
752 // Another CAS operation must thus be issued with the to-space pointer being the expected pointer.
753 // If that CAS operation fails as well, race r2) must have occurred, indicating that
754 // the operation failure is legitimate.
755 //
756 // To keep the code's size small and thus improving cache (icache) performance, this highly
757 // unlikely case should be handled by the smallest possible code. Instead of emitting a third,
758 // explicit CAS operation, the code jumps back and reuses the first CAS operation (step 1)
759 // (passed arguments are identical).
760 //
761 // A failure of the CAS operation in step 1 would imply that the overall CAS operation is supposed
762 // to fail. Jumping back to step 1 requires, however, that step 2 and step 3 are re-executed as well.
763 // It is thus important to ensure that a re-execution of those steps does not put program correctness
764 // at risk:
765 // - Step 2: Either terminates in failure (desired result) or falls through to step 3.
766 // - Step 3: Terminates if the comparison between the forwarded, fetched pointer and the expected value
767 // fails. Unless the reference has been updated in the meanwhile once again, this is
768 // guaranteed to be the case.
769 // In case of a concurrent update, the CAS would be retried again. This is legitimate
770 // in terms of program correctness (even though it is not desired).
771 __ bne(CCR0, step_four);
772
773 __ bind(done);
774 __ block_comment("} cmpxchg_oop (shenandoahgc)");
775 }
776
777 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
778 Register addr, Register count, Register preserve) {
779 assert(ShenandoahCardBarrier, "Should have been checked by caller");
780
781 ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set();
782 CardTable* ct = bs->card_table();
783 assert_different_registers(addr, count, R0);
784
785 Label L_skip_loop, L_store_loop;
786
787 __ sldi_(count, count, LogBytesPerHeapOop);
788
789 // Zero length? Skip.
790 __ beq(CCR0, L_skip_loop);
791
792 __ addi(count, count, -BytesPerHeapOop);
793 __ add(count, addr, count);
794 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
795 __ srdi(addr, addr, CardTable::card_shift());
796 __ srdi(count, count, CardTable::card_shift());
797 __ subf(count, addr, count);
798 __ ld(R0, in_bytes(ShenandoahThreadLocalData::card_table_offset()), R16_thread);
799 __ add(addr, addr, R0);
800 __ addi(count, count, 1);
801 __ li(R0, 0);
802 __ mtctr(count);
803
804 // Byte store loop
805 __ bind(L_store_loop);
806 __ stb(R0, 0, addr);
807 __ addi(addr, addr, 1);
808 __ bdnz(L_store_loop);
809 __ bind(L_skip_loop);
810 }
811
812 #undef __
813
814 #ifdef COMPILER1
815
816 #define __ ce->masm()->
817
818 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler *ce, ShenandoahPreBarrierStub *stub) {
819 __ block_comment("gen_pre_barrier_stub (shenandoahgc) {");
820
821 ShenandoahBarrierSetC1 *bs = (ShenandoahBarrierSetC1*) BarrierSet::barrier_set()->barrier_set_c1();
822 __ bind(*stub->entry());
823
824 // GC status has already been verified by 'ShenandoahBarrierSetC1::pre_barrier'.
825 // This stub is the slowpath of that function.
826
827 assert(stub->pre_val()->is_register(), "pre_val must be a register");
828 Register pre_val = stub->pre_val()->as_register();
829
830 // If 'do_load()' returns false, the to-be-stored value is already available in 'stub->pre_val()'
831 // ("preloaded mode" of the store barrier).
832 if (stub->do_load()) {
833 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false);
834 }
835
836 // Fast path: Reference is null.
837 __ cmpdi(CCR0, pre_val, 0);
838 __ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
839
840 // Argument passing via the stack.
841 __ std(pre_val, -8, R1_SP);
842
843 __ load_const_optimized(R0, bs->pre_barrier_c1_runtime_code_blob()->code_begin());
844 __ call_stub(R0);
845
846 __ b(*stub->continuation());
847 __ block_comment("} gen_pre_barrier_stub (shenandoahgc)");
848 }
849
850 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler *ce,
851 ShenandoahLoadReferenceBarrierStub *stub) {
852 __ block_comment("gen_load_reference_barrier_stub (shenandoahgc) {");
853
854 ShenandoahBarrierSetC1 *bs = (ShenandoahBarrierSetC1*) BarrierSet::barrier_set()->barrier_set_c1();
855 __ bind(*stub->entry());
856
857 Register obj = stub->obj()->as_register();
858 Register res = stub->result()->as_register();
859 Register addr = stub->addr()->as_pointer_register();
860 Register tmp1 = stub->tmp1()->as_register();
861 Register tmp2 = stub->tmp2()->as_register();
862 assert_different_registers(addr, res, tmp1, tmp2);
863
864 #ifdef ASSERT
865 // Ensure that 'res' is 'R3_ARG1' and contains the same value as 'obj' to reduce the number of required
866 // copy instructions.
867 assert(R3_RET == res, "res must be r3");
868 __ cmpd(CCR0, res, obj);
869 __ asm_assert_eq("result register must contain the reference stored in obj");
870 #endif
871
872 DecoratorSet decorators = stub->decorators();
873
874 /* ==== Check whether region is in collection set ==== */
875 // GC status (unstable) has already been verified by 'ShenandoahBarrierSetC1::load_reference_barrier_impl'.
876 // This stub is the slowpath of that function.
877
878 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
879 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
880 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
881 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
882
883 if (is_strong) {
884 // Check whether object is in collection set.
885 __ load_const_optimized(tmp2, ShenandoahHeap::in_cset_fast_test_addr(), tmp1);
886 __ srdi(tmp1, obj, ShenandoahHeapRegion::region_size_bytes_shift_jint());
887 __ lbzx(tmp2, tmp1, tmp2);
888
889 __ andi_(tmp2, tmp2, 1);
890 __ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
891 }
892
893 address blob_addr = nullptr;
894
895 if (is_strong) {
896 if (is_native) {
897 blob_addr = bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin();
898 } else {
899 blob_addr = bs->load_reference_barrier_strong_rt_code_blob()->code_begin();
900 }
901 } else if (is_weak) {
902 blob_addr = bs->load_reference_barrier_weak_rt_code_blob()->code_begin();
903 } else {
904 assert(is_phantom, "only remaining strength");
905 blob_addr = bs->load_reference_barrier_phantom_rt_code_blob()->code_begin();
906 }
907
908 assert(blob_addr != nullptr, "code blob cannot be found");
909
910 // Argument passing via the stack. 'obj' is passed implicitly (as asserted above).
911 __ std(addr, -8, R1_SP);
912
913 __ load_const_optimized(tmp1, blob_addr, tmp2);
914 __ call_stub(tmp1);
915
916 // 'res' is 'R3_RET'. The result is thus already in the correct register.
917
918 __ b(*stub->continuation());
919 __ block_comment("} gen_load_reference_barrier_stub (shenandoahgc)");
920 }
921
922 #undef __
923
924 #define __ sasm->
925
926 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler *sasm) {
927 __ block_comment("generate_c1_pre_barrier_runtime_stub (shenandoahgc) {");
928
929 Label runtime, skip_barrier;
930 BarrierSet *bs = BarrierSet::barrier_set();
931
932 // Argument passing via the stack.
933 const int caller_stack_slots = 3;
934
935 Register R0_pre_val = R0;
936 __ ld(R0, -8, R1_SP);
937 Register R11_tmp1 = R11_scratch1;
938 __ std(R11_tmp1, -16, R1_SP);
939 Register R12_tmp2 = R12_scratch2;
940 __ std(R12_tmp2, -24, R1_SP);
941
942 /* ==== Check whether marking is active ==== */
943 // Even though gc status was checked in 'ShenandoahBarrierSetAssembler::gen_pre_barrier_stub',
944 // another check is required as a safepoint might have been reached in the meantime (JDK-8140588).
945 __ lbz(R12_tmp2, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
946
947 __ andi_(R12_tmp2, R12_tmp2, ShenandoahHeap::MARKING);
948 __ beq(CCR0, skip_barrier);
949
950 /* ==== Add previous value directly to thread-local SATB mark queue ==== */
951 // Check queue's capacity. Jump to runtime if no free slot is available.
952 __ ld(R12_tmp2, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
953 __ cmpdi(CCR0, R12_tmp2, 0);
954 __ beq(CCR0, runtime);
955
956 // Capacity suffices. Decrement the queue's size by one slot (size of one oop).
957 __ addi(R12_tmp2, R12_tmp2, -wordSize);
958 __ std(R12_tmp2, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
959
960 // Enqueue the previous value and skip the runtime invocation.
961 __ ld(R11_tmp1, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);
962 __ stdx(R0_pre_val, R11_tmp1, R12_tmp2);
963 __ b(skip_barrier);
964
965 __ bind(runtime);
966
967 /* ==== Invoke runtime to commit SATB mark queue to gc and allocate a new buffer ==== */
968 // Save to-be-preserved registers.
969 const int nbytes_save = (MacroAssembler::num_volatile_regs + caller_stack_slots) * BytesPerWord;
970 __ save_volatile_gprs(R1_SP, -nbytes_save);
971 __ save_LR_CR(R11_tmp1);
972 __ push_frame_reg_args(nbytes_save, R11_tmp1);
973
974 // Invoke runtime.
975 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), R0_pre_val, R16_thread);
976
977 // Restore to-be-preserved registers.
978 __ pop_frame();
979 __ restore_LR_CR(R11_tmp1);
980 __ restore_volatile_gprs(R1_SP, -nbytes_save);
981
982 __ bind(skip_barrier);
983
984 // Restore spilled registers.
985 __ ld(R11_tmp1, -16, R1_SP);
986 __ ld(R12_tmp2, -24, R1_SP);
987
988 __ blr();
989 __ block_comment("} generate_c1_pre_barrier_runtime_stub (shenandoahgc)");
990 }
991
992 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler *sasm,
993 DecoratorSet decorators) {
994 __ block_comment("generate_c1_load_reference_barrier_runtime_stub (shenandoahgc) {");
995
996 // Argument passing via the stack.
997 const int caller_stack_slots = 1;
998
999 // Save to-be-preserved registers.
1000 const int nbytes_save = (MacroAssembler::num_volatile_regs - 1 // 'R3_ARG1' is skipped
1001 + caller_stack_slots) * BytesPerWord;
1002 __ save_volatile_gprs(R1_SP, -nbytes_save, true, false);
1003
1004 // Load arguments from stack.
1005 // No load required, as assured by assertions in 'ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub'.
1006 Register R3_obj = R3_ARG1;
1007 Register R4_load_addr = R4_ARG2;
1008 __ ld(R4_load_addr, -8, R1_SP);
1009
1010 Register R11_tmp = R11_scratch1;
1011
1012 /* ==== Invoke runtime ==== */
1013 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1014 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1015 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1016 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1017
1018 address jrt_address = nullptr;
1019
1020 if (is_strong) {
1021 if (is_native) {
1022 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1023 } else {
1024 if (UseCompressedOops) {
1025 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
1026 } else {
1027 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1028 }
1029 }
1030 } else if (is_weak) {
1031 assert(!is_native, "weak load reference barrier must not be called off-heap");
1032 if (UseCompressedOops) {
1033 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
1034 } else {
1035 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1036 }
1037 } else {
1038 assert(is_phantom, "reference type must be phantom");
1039 assert(is_native, "phantom load reference barrier must be called off-heap");
1040 jrt_address = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1041 }
1042 assert(jrt_address != nullptr, "load reference barrier runtime routine cannot be found");
1043
1044 __ save_LR_CR(R11_tmp);
1045 __ push_frame_reg_args(nbytes_save, R11_tmp);
1046
1047 // Invoke runtime. Arguments are already stored in the corresponding registers.
1048 __ call_VM_leaf(jrt_address, R3_obj, R4_load_addr);
1049
1050 // Restore to-be-preserved registers.
1051 __ pop_frame();
1052 __ restore_LR_CR(R11_tmp);
1053 __ restore_volatile_gprs(R1_SP, -nbytes_save, true, false); // Skip 'R3_RET' register.
1054
1055 __ blr();
1056 __ block_comment("} generate_c1_load_reference_barrier_runtime_stub (shenandoahgc)");
1057 }
1058
1059 #undef __
1060
1061 #endif // COMPILER1