1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "runtime/javaThread.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "utilities/macros.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44 #ifdef COMPILER2
45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
46 #endif
47
48 #define __ masm->
49
50 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
51 if (handle_gpr) {
52 __ push_IU_state();
53 }
54
55 if (handle_fp) {
56 // Some paths can be reached from the c2i adapter with live fp arguments in registers.
57 assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call");
58
59 const int xmm_size = wordSize * 2;
60 __ subptr(rsp, xmm_size * 8);
61 __ movdbl(Address(rsp, xmm_size * 0), xmm0);
62 __ movdbl(Address(rsp, xmm_size * 1), xmm1);
63 __ movdbl(Address(rsp, xmm_size * 2), xmm2);
64 __ movdbl(Address(rsp, xmm_size * 3), xmm3);
65 __ movdbl(Address(rsp, xmm_size * 4), xmm4);
66 __ movdbl(Address(rsp, xmm_size * 5), xmm5);
67 __ movdbl(Address(rsp, xmm_size * 6), xmm6);
68 __ movdbl(Address(rsp, xmm_size * 7), xmm7);
69 }
70 }
71
72 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
73 if (handle_fp) {
74 const int xmm_size = wordSize * 2;
75 __ movdbl(xmm0, Address(rsp, xmm_size * 0));
76 __ movdbl(xmm1, Address(rsp, xmm_size * 1));
77 __ movdbl(xmm2, Address(rsp, xmm_size * 2));
78 __ movdbl(xmm3, Address(rsp, xmm_size * 3));
79 __ movdbl(xmm4, Address(rsp, xmm_size * 4));
80 __ movdbl(xmm5, Address(rsp, xmm_size * 5));
81 __ movdbl(xmm6, Address(rsp, xmm_size * 6));
82 __ movdbl(xmm7, Address(rsp, xmm_size * 7));
83 __ addptr(rsp, xmm_size * 8);
84 }
85
86 if (handle_gpr) {
87 __ pop_IU_state();
88 }
89 }
90
91 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
92 Register src, Register dst, Register count) {
93
94 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
95
96 if (is_reference_type(type)) {
97 if (ShenandoahCardBarrier) {
98 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
99 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
100 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
101
102 // We need to save the original element count because the array copy stub
103 // will destroy the value and we need it for the card marking barrier.
104 if (!checkcast) {
105 if (!obj_int) {
106 // Save count for barrier
107 __ movptr(r11, count);
108 } else if (disjoint) {
109 // Save dst in r11 in the disjoint case
110 __ movq(r11, dst);
111 }
112 }
113 }
114
115 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
116 Register thread = r15_thread;
117 assert_different_registers(src, dst, count, thread);
118
119 Label L_done;
120 // Short-circuit if count == 0.
121 __ testptr(count, count);
122 __ jcc(Assembler::zero, L_done);
123
124 // Avoid runtime call when not active.
125 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
126 int flags;
127 if (ShenandoahSATBBarrier && dest_uninitialized) {
128 flags = ShenandoahHeap::HAS_FORWARDED;
129 } else {
130 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
131 }
132 __ testb(gc_state, flags);
133 __ jcc(Assembler::zero, L_done);
134
135 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
136
137 assert(src == rdi, "expected");
138 assert(dst == rsi, "expected");
139 assert(count == rdx, "expected");
140 if (UseCompressedOops) {
141 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop),
142 src, dst, count);
143 } else {
144 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop),
145 src, dst, count);
146 }
147
148 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
149
150 __ bind(L_done);
151 }
152 }
153
154 }
155
156 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
157 Register src, Register dst, Register count) {
158
159 if (ShenandoahCardBarrier && is_reference_type(type)) {
160 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
161 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
162 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
163 Register tmp = rax;
164
165 if (!checkcast) {
166 if (!obj_int) {
167 // Save count for barrier
168 count = r11;
169 } else if (disjoint) {
170 // Use the saved dst in the disjoint case
171 dst = r11;
172 }
173 } else {
174 tmp = rscratch1;
175 }
176 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
177 }
178 }
179
180 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
181 Register obj,
182 Register pre_val,
183 Register tmp,
184 bool tosca_live,
185 bool expand_call) {
186
187 if (ShenandoahSATBBarrier) {
188 satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
189 }
190 }
191
192 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
193 Register obj,
194 Register pre_val,
195 Register tmp,
196 bool tosca_live,
197 bool expand_call) {
198 // If expand_call is true then we expand the call_VM_leaf macro
199 // directly to skip generating the check by
200 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
201
202 const Register thread = r15_thread;
203
204 Label done;
205 Label runtime;
206
207 assert(pre_val != noreg, "check this code");
208
209 if (obj != noreg) {
210 assert_different_registers(obj, pre_val, tmp);
211 assert(pre_val != rax, "check this code");
212 }
213
214 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
215 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
216
217 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
218 __ testb(gc_state, ShenandoahHeap::MARKING);
219 __ jcc(Assembler::zero, done);
220
221 // Do we need to load the previous value?
222 if (obj != noreg) {
223 __ load_heap_oop(pre_val, Address(obj, 0), noreg, AS_RAW);
224 }
225
226 // Is the previous value null?
227 __ cmpptr(pre_val, NULL_WORD);
228 __ jcc(Assembler::equal, done);
229
230 // Can we store original value in the thread's buffer?
231 // Is index == 0?
232 // (The index field is typed as size_t.)
233
234 __ movptr(tmp, index); // tmp := *index_adr
235 __ cmpptr(tmp, 0); // tmp == 0?
236 __ jcc(Assembler::equal, runtime); // If yes, goto runtime
237
238 __ subptr(tmp, wordSize); // tmp := tmp - wordSize
239 __ movptr(index, tmp); // *index_adr := tmp
240 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
241
242 // Record the previous value
243 __ movptr(Address(tmp, 0), pre_val);
244 __ jmp(done);
245
246 __ bind(runtime);
247 // save the live input values
248 if(tosca_live) __ push(rax);
249
250 if (obj != noreg && obj != rax)
251 __ push(obj);
252
253 if (pre_val != rax)
254 __ push(pre_val);
255
256 // Calling the runtime using the regular call_VM_leaf mechanism generates
257 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
258 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
259 //
260 // If we care generating the pre-barrier without a frame (e.g. in the
261 // intrinsified Reference.get() routine) then ebp might be pointing to
262 // the caller frame and so this check will most likely fail at runtime.
263 //
264 // Expanding the call directly bypasses the generation of the check.
265 // So when we do not have have a full interpreter frame on the stack
266 // expand_call should be passed true.
267
268 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
269 // pre_val be c_rarg1 (where the call prologue would copy thread argument).
270 // Note: this should not accidentally smash thread, because thread is always r15.
271 assert(thread != c_rarg0, "smashed arg");
272 if (c_rarg0 != pre_val) {
273 __ mov(c_rarg0, pre_val);
274 }
275
276 if (expand_call) {
277 assert(pre_val != c_rarg1, "smashed arg");
278 if (c_rarg1 != thread) {
279 __ mov(c_rarg1, thread);
280 }
281 // Already moved pre_val into c_rarg0 above
282 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), 1);
283 } else {
284 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
285 }
286
287 // save the live input values
288 if (pre_val != rax)
289 __ pop(pre_val);
290
291 if (obj != noreg && obj != rax)
292 __ pop(obj);
293
294 if(tosca_live) __ pop(rax);
295
296 __ bind(done);
297 }
298
299 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) {
300 assert(ShenandoahLoadRefBarrier, "Should be enabled");
301
302 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
303 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
304 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
305 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
306 bool is_narrow = UseCompressedOops && !is_native;
307
308 Label heap_stable, not_cset;
309
310 __ block_comment("load_reference_barrier { ");
311
312 // Check if GC is active
313 Register thread = r15_thread;
314
315 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
316 int flags = ShenandoahHeap::HAS_FORWARDED;
317 if (!is_strong) {
318 flags |= ShenandoahHeap::WEAK_ROOTS;
319 }
320 __ testb(gc_state, flags);
321 __ jcc(Assembler::zero, heap_stable);
322
323 Register tmp1 = noreg, tmp2 = noreg;
324 if (is_strong) {
325 // Test for object in cset
326 // Allocate temporary registers
327 for (int i = 0; i < 8; i++) {
328 Register r = as_Register(i);
329 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) {
330 if (tmp1 == noreg) {
331 tmp1 = r;
332 } else {
333 tmp2 = r;
334 break;
335 }
336 }
337 }
338 assert(tmp1 != noreg, "tmp1 allocated");
339 assert(tmp2 != noreg, "tmp2 allocated");
340 assert_different_registers(tmp1, tmp2, src.base(), src.index());
341 assert_different_registers(tmp1, tmp2, dst);
342
343 __ push(tmp1);
344 __ push(tmp2);
345
346 // Optimized cset-test
347 __ movptr(tmp1, dst);
348 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
349 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
350 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1));
351 __ testbool(tmp1);
352 __ jcc(Assembler::zero, not_cset);
353 }
354
355 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
356
357 // The rest is saved with the optimized path
358
359 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
360 __ subptr(rsp, num_saved_regs * wordSize);
361 uint slot = num_saved_regs;
362 if (dst != rax) {
363 __ movptr(Address(rsp, (--slot) * wordSize), rax);
364 }
365 __ movptr(Address(rsp, (--slot) * wordSize), rcx);
366 __ movptr(Address(rsp, (--slot) * wordSize), rdx);
367 __ movptr(Address(rsp, (--slot) * wordSize), rdi);
368 __ movptr(Address(rsp, (--slot) * wordSize), rsi);
369 __ movptr(Address(rsp, (--slot) * wordSize), r8);
370 __ movptr(Address(rsp, (--slot) * wordSize), r9);
371 __ movptr(Address(rsp, (--slot) * wordSize), r10);
372 __ movptr(Address(rsp, (--slot) * wordSize), r11);
373 // Save APX extended registers r16–r31 if enabled
374 if (UseAPX) {
375 __ movptr(Address(rsp, (--slot) * wordSize), r16);
376 __ movptr(Address(rsp, (--slot) * wordSize), r17);
377 __ movptr(Address(rsp, (--slot) * wordSize), r18);
378 __ movptr(Address(rsp, (--slot) * wordSize), r19);
379 __ movptr(Address(rsp, (--slot) * wordSize), r20);
380 __ movptr(Address(rsp, (--slot) * wordSize), r21);
381 __ movptr(Address(rsp, (--slot) * wordSize), r22);
382 __ movptr(Address(rsp, (--slot) * wordSize), r23);
383 __ movptr(Address(rsp, (--slot) * wordSize), r24);
384 __ movptr(Address(rsp, (--slot) * wordSize), r25);
385 __ movptr(Address(rsp, (--slot) * wordSize), r26);
386 __ movptr(Address(rsp, (--slot) * wordSize), r27);
387 __ movptr(Address(rsp, (--slot) * wordSize), r28);
388 __ movptr(Address(rsp, (--slot) * wordSize), r29);
389 __ movptr(Address(rsp, (--slot) * wordSize), r30);
390 __ movptr(Address(rsp, (--slot) * wordSize), r31);
391 }
392 // r12-r15 are callee saved in all calling conventions
393 assert(slot == 0, "must use all slots");
394
395 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1.
396 Register arg0 = c_rarg0, arg1 = c_rarg1;
397 if (dst == arg1) {
398 __ lea(arg0, src);
399 __ xchgptr(arg1, arg0);
400 } else {
401 __ lea(arg1, src);
402 __ movptr(arg0, dst);
403 }
404
405 if (is_strong) {
406 if (is_narrow) {
407 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1);
408 } else {
409 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1);
410 }
411 } else if (is_weak) {
412 if (is_narrow) {
413 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1);
414 } else {
415 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1);
416 }
417 } else {
418 assert(is_phantom, "only remaining strength");
419 assert(!is_narrow, "phantom access cannot be narrow");
420 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
421 }
422
423 // Restore APX extended registers r31–r16 if previously saved
424 if (UseAPX) {
425 __ movptr(r31, Address(rsp, (slot++) * wordSize));
426 __ movptr(r30, Address(rsp, (slot++) * wordSize));
427 __ movptr(r29, Address(rsp, (slot++) * wordSize));
428 __ movptr(r28, Address(rsp, (slot++) * wordSize));
429 __ movptr(r27, Address(rsp, (slot++) * wordSize));
430 __ movptr(r26, Address(rsp, (slot++) * wordSize));
431 __ movptr(r25, Address(rsp, (slot++) * wordSize));
432 __ movptr(r24, Address(rsp, (slot++) * wordSize));
433 __ movptr(r23, Address(rsp, (slot++) * wordSize));
434 __ movptr(r22, Address(rsp, (slot++) * wordSize));
435 __ movptr(r21, Address(rsp, (slot++) * wordSize));
436 __ movptr(r20, Address(rsp, (slot++) * wordSize));
437 __ movptr(r19, Address(rsp, (slot++) * wordSize));
438 __ movptr(r18, Address(rsp, (slot++) * wordSize));
439 __ movptr(r17, Address(rsp, (slot++) * wordSize));
440 __ movptr(r16, Address(rsp, (slot++) * wordSize));
441 }
442 __ movptr(r11, Address(rsp, (slot++) * wordSize));
443 __ movptr(r10, Address(rsp, (slot++) * wordSize));
444 __ movptr(r9, Address(rsp, (slot++) * wordSize));
445 __ movptr(r8, Address(rsp, (slot++) * wordSize));
446 __ movptr(rsi, Address(rsp, (slot++) * wordSize));
447 __ movptr(rdi, Address(rsp, (slot++) * wordSize));
448 __ movptr(rdx, Address(rsp, (slot++) * wordSize));
449 __ movptr(rcx, Address(rsp, (slot++) * wordSize));
450
451 if (dst != rax) {
452 __ movptr(dst, rax);
453 __ movptr(rax, Address(rsp, (slot++) * wordSize));
454 }
455
456 assert(slot == num_saved_regs, "must use all slots");
457 __ addptr(rsp, num_saved_regs * wordSize);
458
459 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
460
461 __ bind(not_cset);
462
463 if (is_strong) {
464 __ pop(tmp2);
465 __ pop(tmp1);
466 }
467
468 __ bind(heap_stable);
469
470 __ block_comment("} load_reference_barrier");
471 }
472
473 //
474 // Arguments:
475 //
476 // Inputs:
477 // src: oop location, might be clobbered
478 // tmp1: scratch register, might not be valid.
479 //
480 // Output:
481 // dst: oop loaded from src location
482 //
483 // Kill:
484 // tmp1 (if it is valid)
485 //
486 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
487 Register dst, Address src, Register tmp1) {
488 // 1: non-reference load, no additional barrier is needed
489 if (!is_reference_type(type)) {
490 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
491 return;
492 }
493
494 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
495
496 // 2: load a reference from src location and apply LRB if needed
497 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
498 Register result_dst = dst;
499 bool use_tmp1_for_dst = false;
500
501 // Preserve src location for LRB
502 if (dst == src.base() || dst == src.index()) {
503 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
504 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
505 dst = tmp1;
506 use_tmp1_for_dst = true;
507 } else {
508 dst = rdi;
509 __ push(dst);
510 }
511 assert_different_registers(dst, src.base(), src.index());
512 }
513
514 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
515
516 load_reference_barrier(masm, dst, src, decorators);
517
518 // Move loaded oop to final destination
519 if (dst != result_dst) {
520 __ movptr(result_dst, dst);
521
522 if (!use_tmp1_for_dst) {
523 __ pop(dst);
524 }
525
526 dst = result_dst;
527 }
528 } else {
529 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
530 }
531
532 // 3: apply keep-alive barrier if needed
533 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
534 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
535
536 assert_different_registers(dst, tmp1, r15_thread);
537 // Generate the SATB pre-barrier code to log the value of
538 // the referent field in an SATB buffer.
539 shenandoah_write_barrier_pre(masm /* masm */,
540 noreg /* obj */,
541 dst /* pre_val */,
542 tmp1 /* tmp */,
543 true /* tosca_live */,
544 true /* expand_call */);
545
546 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
547 }
548 }
549
550 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
551 assert(ShenandoahCardBarrier, "Should have been checked by caller");
552
553 // Does a store check for the oop in register obj. The content of
554 // register obj is destroyed afterwards.
555 __ shrptr(obj, CardTable::card_shift());
556
557 // We'll use this register as the TLS base address and also later on
558 // to hold the byte_map_base.
559 Register thread = r15_thread;
560 Register tmp = rscratch1;
561
562 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
563 __ movptr(tmp, curr_ct_holder_addr);
564 Address card_addr(tmp, obj, Address::times_1);
565
566 int dirty = CardTable::dirty_card_val();
567 if (UseCondCardMark) {
568 Label L_already_dirty;
569 __ cmpb(card_addr, dirty);
570 __ jccb(Assembler::equal, L_already_dirty);
571 __ movb(card_addr, dirty);
572 __ bind(L_already_dirty);
573 } else {
574 __ movb(card_addr, dirty);
575 }
576 }
577
578 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
579 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
580
581 bool on_oop = is_reference_type(type);
582 bool in_heap = (decorators & IN_HEAP) != 0;
583 bool as_normal = (decorators & AS_NORMAL) != 0;
584 if (on_oop && in_heap) {
585 bool needs_pre_barrier = as_normal;
586
587 // flatten object address if needed
588 // We do it regardless of precise because we need the registers
589 if (dst.index() == noreg && dst.disp() == 0) {
590 if (dst.base() != tmp1) {
591 __ movptr(tmp1, dst.base());
592 }
593 } else {
594 __ lea(tmp1, dst);
595 }
596
597 assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
598
599 if (needs_pre_barrier) {
600 shenandoah_write_barrier_pre(masm /*masm*/,
601 tmp1 /* obj */,
602 tmp2 /* pre_val */,
603 tmp3 /* tmp */,
604 val != noreg /* tosca_live */,
605 false /* expand_call */);
606 }
607
608 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
609 if (val != noreg) {
610 if (ShenandoahCardBarrier) {
611 store_check(masm, tmp1);
612 }
613 }
614 } else {
615 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
616 }
617 }
618
619 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
620 Register obj, Register tmp, Label& slowpath) {
621 Label done;
622 // Resolve jobject
623 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
624
625 // Check for null.
626 __ testptr(obj, obj);
627 __ jcc(Assembler::zero, done);
628
629 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
630 __ testb(gc_state, ShenandoahHeap::EVACUATION);
631 __ jccb(Assembler::notZero, slowpath);
632 __ bind(done);
633 }
634
635 // Special Shenandoah CAS implementation that handles false negatives
636 // due to concurrent evacuation.
637 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
638 Register res, Address addr, Register oldval, Register newval,
639 bool exchange, Register tmp1, Register tmp2) {
640 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
641 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
642 assert_different_registers(oldval, tmp1, tmp2);
643 assert_different_registers(newval, tmp1, tmp2);
644
645 Label L_success, L_failure;
646
647 // Remember oldval for retry logic below
648 if (UseCompressedOops) {
649 __ movl(tmp1, oldval);
650 } else {
651 __ movptr(tmp1, oldval);
652 }
653
654 // Step 1. Fast-path.
655 //
656 // Try to CAS with given arguments. If successful, then we are done.
657
658 if (UseCompressedOops) {
659 __ lock();
660 __ cmpxchgl(newval, addr);
661 } else {
662 __ lock();
663 __ cmpxchgptr(newval, addr);
664 }
665 __ jcc(Assembler::equal, L_success);
666
667 // Step 2. CAS had failed. This may be a false negative.
668 //
669 // The trouble comes when we compare the to-space pointer with the from-space
670 // pointer to the same object. To resolve this, it will suffice to resolve
671 // the value from memory -- this will give both to-space pointers.
672 // If they mismatch, then it was a legitimate failure.
673 //
674 // Before reaching to resolve sequence, see if we can avoid the whole shebang
675 // with filters.
676
677 // Filter: when offending in-memory value is null, the failure is definitely legitimate
678 if (UseCompressedOops) {
679 __ testl(oldval, oldval);
680 } else {
681 __ testptr(oldval, oldval);
682 }
683 __ jcc(Assembler::zero, L_failure);
684
685 // Filter: when heap is stable, the failure is definitely legitimate
686 const Register thread = r15_thread;
687 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
688 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
689 __ jcc(Assembler::zero, L_failure);
690
691 if (UseCompressedOops) {
692 __ movl(tmp2, oldval);
693 __ decode_heap_oop(tmp2);
694 } else {
695 __ movptr(tmp2, oldval);
696 }
697
698 // Decode offending in-memory value.
699 // Test if-forwarded
700 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
701 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded
702 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded
703
704 // Load and mask forwarding pointer
705 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
706 __ shrptr(tmp2, 2);
707 __ shlptr(tmp2, 2);
708
709 if (UseCompressedOops) {
710 __ decode_heap_oop(tmp1); // decode for comparison
711 }
712
713 // Now we have the forwarded offender in tmp2.
714 // Compare and if they don't match, we have legitimate failure
715 __ cmpptr(tmp1, tmp2);
716 __ jcc(Assembler::notEqual, L_failure);
717
718 // Step 3. Need to fix the memory ptr before continuing.
719 //
720 // At this point, we have from-space oldval in the register, and its to-space
721 // address is in tmp2. Let's try to update it into memory. We don't care if it
722 // succeeds or not. If it does, then the retrying CAS would see it and succeed.
723 // If this fixup fails, this means somebody else beat us to it, and necessarily
724 // with to-space ptr store. We still have to do the retry, because the GC might
725 // have updated the reference for us.
726
727 if (UseCompressedOops) {
728 __ encode_heap_oop(tmp2); // previously decoded at step 2.
729 }
730
731 if (UseCompressedOops) {
732 __ lock();
733 __ cmpxchgl(tmp2, addr);
734 } else {
735 __ lock();
736 __ cmpxchgptr(tmp2, addr);
737 }
738
739 // Step 4. Try to CAS again.
740 //
741 // This is guaranteed not to have false negatives, because oldval is definitely
742 // to-space, and memory pointer is to-space as well. Nothing is able to store
743 // from-space ptr into memory anymore. Make sure oldval is restored, after being
744 // garbled during retries.
745 //
746 if (UseCompressedOops) {
747 __ movl(oldval, tmp2);
748 } else {
749 __ movptr(oldval, tmp2);
750 }
751
752 if (UseCompressedOops) {
753 __ lock();
754 __ cmpxchgl(newval, addr);
755 } else {
756 __ lock();
757 __ cmpxchgptr(newval, addr);
758 }
759 if (!exchange) {
760 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
761 }
762
763 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
764 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
765 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
766
767 if (exchange) {
768 __ bind(L_failure);
769 __ bind(L_success);
770 } else {
771 assert(res != noreg, "need result register");
772
773 Label exit;
774 __ bind(L_failure);
775 __ xorptr(res, res);
776 __ jmpb(exit);
777
778 __ bind(L_success);
779 __ movptr(res, 1);
780 __ bind(exit);
781 }
782 }
783
784 #ifdef COMPILER2
785 void ShenandoahBarrierSetAssembler::load_ref_barrier_c2(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp1, Register tmp2, Register tmp3, bool narrow) {
786 if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
787 return;
788 }
789 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
790
791 ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp1, tmp2, tmp3, narrow);
792 stub->dont_preserve(obj); // set at the end, no need to save
793 if (tmp1 != noreg) {
794 stub->dont_preserve(tmp1); // temp, no need to save
795 }
796 if (tmp2 != noreg) {
797 stub->dont_preserve(tmp2); // temp, no need to save
798 }
799 if (tmp3 != noreg) {
800 stub->dont_preserve(tmp3); // temp, no need to save
801 }
802
803 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
804 int flags = ShenandoahHeap::HAS_FORWARDED;
805 bool is_strong = (node->barrier_data() & ShenandoahBarrierStrong) != 0;
806 if (!is_strong) {
807 flags |= ShenandoahHeap::WEAK_ROOTS;
808 }
809 __ testb(gc_state, flags);
810 __ jcc(Assembler::notZero, *stub->entry());
811 __ bind(*stub->continuation());
812 }
813
814 void ShenandoahBarrierSetAssembler::satb_barrier_c2(const MachNode* node, MacroAssembler* masm,
815 Register addr, Register preval, Register tmp) {
816 if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
817 return;
818 }
819 ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, preval, tmp);
820 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
821 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
822 __ testb(gc_state, ShenandoahHeap::MARKING);
823 __ jcc(Assembler::notZero, *stub->entry());
824 __ bind(*stub->continuation());
825 }
826
827 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm,
828 Register addr, Register addr_tmp, Register tmp) {
829 if (!ShenandoahCardBarrier ||
830 (node->barrier_data() & (ShenandoahBarrierCardMark | ShenandoahBarrierCardMarkNotNull)) == 0) {
831 return;
832 }
833 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
834 if (addr != noreg) {
835 __ mov(addr_tmp, addr);
836 }
837 __ shrptr(addr_tmp, CardTable::card_shift());
838
839 Address curr_ct_holder_addr(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
840 __ movptr(tmp, curr_ct_holder_addr);
841 Address card_addr(tmp, addr_tmp, Address::times_1);
842
843 int dirty = CardTable::dirty_card_val();
844 if (UseCondCardMark) {
845 Label L_already_dirty;
846 __ cmpb(card_addr, dirty);
847 __ jccb(Assembler::equal, L_already_dirty);
848 __ movb(card_addr, dirty);
849 __ bind(L_already_dirty);
850 } else {
851 __ movb(card_addr, dirty);
852 }
853 }
854
855 void ShenandoahBarrierSetAssembler::cmpxchg_oop_c2(const MachNode* node, MacroAssembler* masm,
856 Register res, Address addr, Register oldval, Register newval, Register tmp1, Register tmp2,
857 bool exchange) {
858 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
859 assert_different_registers(oldval, tmp1, tmp2);
860 assert_different_registers(newval, tmp1, tmp2);
861
862 // Remember oldval for retry logic in slow path. We need to do it here,
863 // because it will be overwritten by the fast-path CAS.
864 if (ShenandoahCASBarrier) {
865 __ movptr(tmp2, oldval);
866 }
867
868 // Fast-path: Try to CAS optimistically. If successful, then we are done.
869 __ lock();
870 if (UseCompressedOops) {
871 __ cmpxchgl(newval, addr);
872 } else {
873 __ cmpxchgptr(newval, addr);
874 }
875
876 // If we need a boolean result out of CAS, set the flag appropriately and promote the result.
877 // This would be the final result if we do not go slow.
878 if (!exchange) {
879 assert(res != noreg, "need result register");
880 __ setcc(Assembler::equal, res);
881 } else {
882 assert(res == noreg, "no result expected");
883 }
884
885 if (ShenandoahCASBarrier) {
886 ShenandoahCASBarrierSlowStubC2* const slow_stub =
887 ShenandoahCASBarrierSlowStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, exchange);
888 if (res != noreg) {
889 slow_stub->dont_preserve(res); // set at the end, no need to save
890 }
891 slow_stub->dont_preserve(oldval); // saved explicitly
892 slow_stub->dont_preserve(tmp1); // temp, no need to save
893 slow_stub->dont_preserve(tmp2); // temp, no need to save
894
895 // On success, we do not need any additional handling.
896 __ jccb(Assembler::equal, *slow_stub->continuation());
897
898 // If GC is in progress, it is likely we need additional handling for false negatives.
899 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
900 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
901 __ jcc(Assembler::notZero, *slow_stub->entry());
902
903 // Slow stub re-enters with result set correctly.
904 __ bind(*slow_stub->continuation());
905 }
906 }
907
908 #undef __
909 #define __ masm.
910
911 void ShenandoahLoadRefBarrierStubC2::emit_code(MacroAssembler& masm) {
912 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
913 __ bind(*entry());
914
915 Register obj = _obj;
916 if (_narrow) {
917 __ movl(_tmp1, _obj);
918 __ decode_heap_oop(_tmp1);
919 obj = _tmp1;
920 }
921
922 // Weak/phantom loads always need to go to runtime.
923 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
924 __ movptr(_tmp2, obj);
925 __ shrptr(_tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
926 __ movptr(_tmp3, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
927 __ movbool(_tmp2, Address(_tmp2, _tmp3, Address::times_1));
928 __ testbool(_tmp2);
929 __ jcc(Assembler::zero, *continuation());
930 }
931
932 {
933 SaveLiveRegisters save_registers(&masm, this);
934 if (c_rarg0 != obj) {
935 if (c_rarg0 == _addr) {
936 __ movptr(_tmp2, _addr);
937 _addr = _tmp2;
938 }
939 __ movptr(c_rarg0, obj);
940 }
941 if (c_rarg1 != _addr) {
942 __ movptr(c_rarg1, _addr);
943 }
944
945 address entry;
946 if (_narrow) {
947 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
948 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
949 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
950 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
951 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
952 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
953 }
954 } else {
955 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
956 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
957 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
958 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
959 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
960 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
961 }
962 }
963 __ call(RuntimeAddress(entry), rax);
964 assert(!save_registers.contains(_obj), "must not save result register");
965 __ movptr(_obj, rax);
966 }
967 if (_narrow) {
968 __ encode_heap_oop(_obj);
969 }
970
971 __ jmp(*continuation());
972 }
973
974 void ShenandoahSATBBarrierStubC2::emit_code(MacroAssembler& masm) {
975 __ bind(*entry());
976 Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
977 Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
978
979 Label runtime;
980
981 // Do we need to load the previous value?
982 if (_addr != noreg) {
983 __ load_heap_oop(_preval, Address(_addr, 0), noreg, AS_RAW);
984 }
985 // Is the previous value null?
986 __ cmpptr(_preval, NULL_WORD);
987 __ jcc(Assembler::equal, *continuation());
988
989 // Can we store a value in the given thread's buffer?
990 // (The index field is typed as size_t.)
991 __ movptr(_tmp, index);
992 __ testptr(_tmp, _tmp);
993 __ jccb(Assembler::zero, runtime);
994 // The buffer is not full, store value into it.
995 __ subptr(_tmp, wordSize);
996 __ movptr(index, _tmp);
997 __ addptr(_tmp, buffer);
998 __ movptr(Address(_tmp, 0), _preval);
999
1000 __ jmp(*continuation());
1001
1002 __ bind(runtime);
1003 {
1004 SaveLiveRegisters save_registers(&masm, this);
1005 if (c_rarg0 != _preval) {
1006 __ mov(c_rarg0, _preval);
1007 }
1008 // rax is a caller-saved, non-argument-passing register, so it does not
1009 // interfere with c_rarg0 or c_rarg1. If it contained any live value before
1010 // entering this stub, it is saved at this point, and restored after the
1011 // call. If it did not contain any live value, it is free to be used. In
1012 // either case, it is safe to use it here as a call scratch register.
1013 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
1014 }
1015 __ jmp(*continuation());
1016 }
1017
1018 void ShenandoahCASBarrierMidStubC2::emit_code(MacroAssembler& masm) {
1019 // x86_64 does not implement this.
1020 ShouldNotReachHere();
1021 }
1022
1023 void ShenandoahCASBarrierSlowStubC2::emit_code(MacroAssembler& masm) {
1024 __ bind(*entry());
1025
1026 // CAS has failed because the value held at addr does not match expected.
1027 // This may be a false negative because the version in memory might be
1028 // the from-space version of the same object we currently hold to-space
1029 // reference for.
1030 //
1031 // To resolve this, we need to pass the location through the LRB fixup,
1032 // this will make sure that the location has only to-space pointers.
1033 // To avoid calling into runtime often, we cset-check the object first.
1034 // We can inline most of the work here, but there is little point,
1035 // as CAS failures over cset locations must be rare. This fast-slow split
1036 // matches what we do for normal LRB.
1037
1038 assert(_expected == rax, "expected must be rax");
1039
1040 // Non-strong references should always go to runtime. We do not expect
1041 // CASes over non-strong locations.
1042 assert((_node->barrier_data() & ShenandoahBarrierStrong) != 0, "Only strong references for CASes");
1043
1044 Label L_final;
1045
1046 // Fast-path stashed original oldval to tmp2 for us. We need to save it
1047 // for the final retry. This frees up tmp2 for cset check below.
1048 __ push(_tmp2);
1049
1050 // (Compressed) failure witness is in _expected.
1051 // Unpack it and check if it is in collection set.
1052 __ movptr(_tmp1, _expected);
1053 if (UseCompressedOops) {
1054 __ decode_heap_oop(_tmp1);
1055 }
1056 __ shrptr(_tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1057 __ movptr(_tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1058 __ movbool(_tmp1, Address(_tmp1, _tmp2, Address::times_1));
1059 __ testbool(_tmp1);
1060 __ jcc(Assembler::zero, L_final);
1061
1062 {
1063 SaveLiveRegisters save_registers(&masm, this);
1064 // Load up failure witness again.
1065 if (c_rarg0 != _expected) {
1066 __ movptr(c_rarg0, _expected);
1067 }
1068 if (UseCompressedOops) {
1069 __ decode_heap_oop(c_rarg0);
1070 }
1071 __ lea(c_rarg1, _addr);
1072
1073 if (UseCompressedOops) {
1074 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), 2);
1075 } else {
1076 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), 2);
1077 }
1078 // We have called LRB to fix up the heap location. We do not care about its result,
1079 // as we will just try to CAS the location again.
1080 }
1081
1082 __ bind(L_final);
1083
1084 // Try to CAS again with the original expected value.
1085 // At this point, there can no longer be false negatives.
1086 __ pop(_expected);
1087 __ lock();
1088 if (UseCompressedOops) {
1089 __ cmpxchgl(_new_val, _addr);
1090 } else {
1091 __ cmpxchgptr(_new_val, _addr);
1092 }
1093 if (!_cae) {
1094 assert(_result != noreg, "need result register");
1095 __ setcc(Assembler::equal, _result);
1096 } else {
1097 assert(_result == noreg, "no result expected");
1098 }
1099 __ jmp(*continuation());
1100 }
1101
1102 #undef __
1103 #define __ masm->
1104 #endif
1105
1106 #ifdef PRODUCT
1107 #define BLOCK_COMMENT(str) /* nothing */
1108 #else
1109 #define BLOCK_COMMENT(str) __ block_comment(str)
1110 #endif
1111
1112 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
1113
1114 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
1115
1116 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
1117 Register addr, Register count,
1118 Register tmp) {
1119 assert(ShenandoahCardBarrier, "Should have been checked by caller");
1120
1121 Label L_loop, L_done;
1122 const Register end = count;
1123 assert_different_registers(addr, end);
1124
1125 // Zero count? Nothing to do.
1126 __ testl(count, count);
1127 __ jccb(Assembler::zero, L_done);
1128
1129 const Register thread = r15_thread;
1130 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
1131 __ movptr(tmp, curr_ct_holder_addr);
1132
1133 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
1134 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
1135 __ shrptr(addr, CardTable::card_shift());
1136 __ shrptr(end, CardTable::card_shift());
1137 __ subptr(end, addr); // end --> cards count
1138
1139 __ addptr(addr, tmp);
1140
1141 __ BIND(L_loop);
1142 __ movb(Address(addr, count, Address::times_1), 0);
1143 __ decrement(count);
1144 __ jccb(Assembler::greaterEqual, L_loop);
1145
1146 __ BIND(L_done);
1147 }
1148
1149 #undef __
1150
1151 #ifdef COMPILER1
1152
1153 #define __ ce->masm()->
1154
1155 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
1156 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1157 // At this point we know that marking is in progress.
1158 // If do_load() is true then we have to emit the
1159 // load of the previous value; otherwise it has already
1160 // been loaded into _pre_val.
1161
1162 __ bind(*stub->entry());
1163 assert(stub->pre_val()->is_register(), "Precondition.");
1164
1165 Register pre_val_reg = stub->pre_val()->as_register();
1166
1167 if (stub->do_load()) {
1168 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
1169 }
1170
1171 __ cmpptr(pre_val_reg, NULL_WORD);
1172 __ jcc(Assembler::equal, *stub->continuation());
1173 ce->store_parameter(stub->pre_val()->as_register(), 0);
1174 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
1175 __ jmp(*stub->continuation());
1176
1177 }
1178
1179 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
1180 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1181 __ bind(*stub->entry());
1182
1183 DecoratorSet decorators = stub->decorators();
1184 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1185 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1186 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1187 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1188
1189 Register obj = stub->obj()->as_register();
1190 Register res = stub->result()->as_register();
1191 Register addr = stub->addr()->as_pointer_register();
1192 Register tmp1 = stub->tmp1()->as_register();
1193 Register tmp2 = stub->tmp2()->as_register();
1194 assert_different_registers(obj, res, addr, tmp1, tmp2);
1195
1196 Label slow_path;
1197
1198 assert(res == rax, "result must arrive in rax");
1199
1200 if (res != obj) {
1201 __ mov(res, obj);
1202 }
1203
1204 if (is_strong) {
1205 // Check for object being in the collection set.
1206 __ mov(tmp1, res);
1207 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1208 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1209 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
1210 __ testbool(tmp2);
1211 __ jcc(Assembler::zero, *stub->continuation());
1212 }
1213
1214 __ bind(slow_path);
1215 ce->store_parameter(res, 0);
1216 ce->store_parameter(addr, 1);
1217 if (is_strong) {
1218 if (is_native) {
1219 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
1220 } else {
1221 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
1222 }
1223 } else if (is_weak) {
1224 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
1225 } else {
1226 assert(is_phantom, "only remaining strength");
1227 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
1228 }
1229 __ jmp(*stub->continuation());
1230 }
1231
1232 #undef __
1233
1234 #define __ sasm->
1235
1236 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
1237 __ prologue("shenandoah_pre_barrier", false);
1238 // arg0 : previous value of memory
1239
1240 __ push(rax);
1241 __ push(rdx);
1242
1243 const Register pre_val = rax;
1244 const Register thread = r15_thread;
1245 const Register tmp = rdx;
1246
1247 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1248 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1249
1250 Label done;
1251 Label runtime;
1252
1253 // Is SATB still active?
1254 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1255 __ testb(gc_state, ShenandoahHeap::MARKING);
1256 __ jcc(Assembler::zero, done);
1257
1258 // Can we store original value in the thread's buffer?
1259
1260 __ movptr(tmp, queue_index);
1261 __ testptr(tmp, tmp);
1262 __ jcc(Assembler::zero, runtime);
1263 __ subptr(tmp, wordSize);
1264 __ movptr(queue_index, tmp);
1265 __ addptr(tmp, buffer);
1266
1267 // prev_val (rax)
1268 __ load_parameter(0, pre_val);
1269 __ movptr(Address(tmp, 0), pre_val);
1270 __ jmp(done);
1271
1272 __ bind(runtime);
1273
1274 __ save_live_registers_no_oop_map(true);
1275
1276 // load the pre-value
1277 __ load_parameter(0, rcx);
1278 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), rcx);
1279
1280 __ restore_live_registers(true);
1281
1282 __ bind(done);
1283
1284 __ pop(rdx);
1285 __ pop(rax);
1286
1287 __ epilogue();
1288 }
1289
1290 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
1291 __ prologue("shenandoah_load_reference_barrier", false);
1292 // arg0 : object to be resolved
1293
1294 __ save_live_registers_no_oop_map(true);
1295
1296 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1297 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1298 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1299 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1300
1301 __ load_parameter(0, c_rarg0);
1302 __ load_parameter(1, c_rarg1);
1303 if (is_strong) {
1304 if (is_native) {
1305 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1306 } else {
1307 if (UseCompressedOops) {
1308 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1);
1309 } else {
1310 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1311 }
1312 }
1313 } else if (is_weak) {
1314 assert(!is_native, "weak must not be called off-heap");
1315 if (UseCompressedOops) {
1316 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
1317 } else {
1318 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
1319 }
1320 } else {
1321 assert(is_phantom, "only remaining strength");
1322 assert(is_native, "phantom must only be called off-heap");
1323 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1);
1324 }
1325
1326 __ restore_live_registers_except_rax(true);
1327
1328 __ epilogue();
1329 }
1330
1331 #undef __
1332
1333 #endif // COMPILER1