1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "runtime/javaThread.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "utilities/macros.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44 #ifdef COMPILER2
45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
46 #endif
47
48 #define __ masm->
49
50 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
51 if (handle_gpr) {
52 __ push_IU_state();
53 }
54
55 if (handle_fp) {
56 // Some paths can be reached from the c2i adapter with live fp arguments in registers.
57 assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call");
58
59 const int xmm_size = wordSize * 2;
60 __ subptr(rsp, xmm_size * 8);
61 __ movdbl(Address(rsp, xmm_size * 0), xmm0);
62 __ movdbl(Address(rsp, xmm_size * 1), xmm1);
63 __ movdbl(Address(rsp, xmm_size * 2), xmm2);
64 __ movdbl(Address(rsp, xmm_size * 3), xmm3);
65 __ movdbl(Address(rsp, xmm_size * 4), xmm4);
66 __ movdbl(Address(rsp, xmm_size * 5), xmm5);
67 __ movdbl(Address(rsp, xmm_size * 6), xmm6);
68 __ movdbl(Address(rsp, xmm_size * 7), xmm7);
69 }
70 }
71
72 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
73 if (handle_fp) {
74 const int xmm_size = wordSize * 2;
75 __ movdbl(xmm0, Address(rsp, xmm_size * 0));
76 __ movdbl(xmm1, Address(rsp, xmm_size * 1));
77 __ movdbl(xmm2, Address(rsp, xmm_size * 2));
78 __ movdbl(xmm3, Address(rsp, xmm_size * 3));
79 __ movdbl(xmm4, Address(rsp, xmm_size * 4));
80 __ movdbl(xmm5, Address(rsp, xmm_size * 5));
81 __ movdbl(xmm6, Address(rsp, xmm_size * 6));
82 __ movdbl(xmm7, Address(rsp, xmm_size * 7));
83 __ addptr(rsp, xmm_size * 8);
84 }
85
86 if (handle_gpr) {
87 __ pop_IU_state();
88 }
89 }
90
91 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
92 Register src, Register dst, Register count) {
93
94 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
95
96 if (is_reference_type(type)) {
97 if (ShenandoahCardBarrier) {
98 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
99 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
100 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
101
102 // We need to save the original element count because the array copy stub
103 // will destroy the value and we need it for the card marking barrier.
104 if (!checkcast) {
105 if (!obj_int) {
106 // Save count for barrier
107 __ movptr(r11, count);
108 } else if (disjoint) {
109 // Save dst in r11 in the disjoint case
110 __ movq(r11, dst);
111 }
112 }
113 }
114
115 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
116 Register thread = r15_thread;
117 assert_different_registers(src, dst, count, thread);
118
119 Label L_done;
120 // Short-circuit if count == 0.
121 __ testptr(count, count);
122 __ jcc(Assembler::zero, L_done);
123
124 // Avoid runtime call when not active.
125 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
126 int flags;
127 if (ShenandoahSATBBarrier && dest_uninitialized) {
128 flags = ShenandoahHeap::HAS_FORWARDED;
129 } else {
130 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
131 }
132 __ testb(gc_state, flags);
133 __ jcc(Assembler::zero, L_done);
134
135 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
136
137 assert(src == rdi, "expected");
138 assert(dst == rsi, "expected");
139 assert(count == rdx, "expected");
140 if (UseCompressedOops) {
141 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop),
142 src, dst, count);
143 } else {
144 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop),
145 src, dst, count);
146 }
147
148 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
149
150 __ bind(L_done);
151 }
152 }
153
154 }
155
156 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
157 Register src, Register dst, Register count) {
158
159 if (ShenandoahCardBarrier && is_reference_type(type)) {
160 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
161 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
162 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
163 Register tmp = rax;
164
165 if (!checkcast) {
166 if (!obj_int) {
167 // Save count for barrier
168 count = r11;
169 } else if (disjoint) {
170 // Use the saved dst in the disjoint case
171 dst = r11;
172 }
173 } else {
174 tmp = rscratch1;
175 }
176 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
177 }
178 }
179
180 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
181 Register obj,
182 Register pre_val,
183 Register tmp,
184 bool tosca_live,
185 bool expand_call) {
186
187 if (ShenandoahSATBBarrier) {
188 satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
189 }
190 }
191
192 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
193 Register obj,
194 Register pre_val,
195 Register tmp,
196 bool tosca_live,
197 bool expand_call) {
198 // If expand_call is true then we expand the call_VM_leaf macro
199 // directly to skip generating the check by
200 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
201
202 const Register thread = r15_thread;
203
204 Label done;
205 Label runtime;
206
207 assert(pre_val != noreg, "check this code");
208
209 if (obj != noreg) {
210 assert_different_registers(obj, pre_val, tmp);
211 assert(pre_val != rax, "check this code");
212 }
213
214 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
215 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
216
217 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
218 __ testb(gc_state, ShenandoahHeap::MARKING);
219 __ jcc(Assembler::zero, done);
220
221 // Do we need to load the previous value?
222 if (obj != noreg) {
223 __ load_heap_oop(pre_val, Address(obj, 0), noreg, AS_RAW);
224 }
225
226 // Is the previous value null?
227 __ cmpptr(pre_val, NULL_WORD);
228 __ jcc(Assembler::equal, done);
229
230 // Can we store original value in the thread's buffer?
231 // Is index == 0?
232 // (The index field is typed as size_t.)
233
234 __ movptr(tmp, index); // tmp := *index_adr
235 __ cmpptr(tmp, 0); // tmp == 0?
236 __ jcc(Assembler::equal, runtime); // If yes, goto runtime
237
238 __ subptr(tmp, wordSize); // tmp := tmp - wordSize
239 __ movptr(index, tmp); // *index_adr := tmp
240 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
241
242 // Record the previous value
243 __ movptr(Address(tmp, 0), pre_val);
244 __ jmp(done);
245
246 __ bind(runtime);
247 // save the live input values
248 if(tosca_live) __ push(rax);
249
250 if (obj != noreg && obj != rax)
251 __ push(obj);
252
253 if (pre_val != rax)
254 __ push(pre_val);
255
256 // Calling the runtime using the regular call_VM_leaf mechanism generates
257 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
258 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
259 //
260 // If we care generating the pre-barrier without a frame (e.g. in the
261 // intrinsified Reference.get() routine) then ebp might be pointing to
262 // the caller frame and so this check will most likely fail at runtime.
263 //
264 // Expanding the call directly bypasses the generation of the check.
265 // So when we do not have have a full interpreter frame on the stack
266 // expand_call should be passed true.
267
268 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
269 // pre_val be c_rarg1 (where the call prologue would copy thread argument).
270 // Note: this should not accidentally smash thread, because thread is always r15.
271 assert(thread != c_rarg0, "smashed arg");
272 if (c_rarg0 != pre_val) {
273 __ mov(c_rarg0, pre_val);
274 }
275
276 if (expand_call) {
277 assert(pre_val != c_rarg1, "smashed arg");
278 if (c_rarg1 != thread) {
279 __ mov(c_rarg1, thread);
280 }
281 // Already moved pre_val into c_rarg0 above
282 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), 1);
283 } else {
284 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
285 }
286
287 // save the live input values
288 if (pre_val != rax)
289 __ pop(pre_val);
290
291 if (obj != noreg && obj != rax)
292 __ pop(obj);
293
294 if(tosca_live) __ pop(rax);
295
296 __ bind(done);
297 }
298
299 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) {
300 assert(ShenandoahLoadRefBarrier, "Should be enabled");
301
302 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
303 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
304 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
305 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
306 bool is_narrow = UseCompressedOops && !is_native;
307
308 Label heap_stable, not_cset;
309
310 __ block_comment("load_reference_barrier { ");
311
312 // Check if GC is active
313 Register thread = r15_thread;
314
315 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
316 int flags = ShenandoahHeap::HAS_FORWARDED;
317 if (!is_strong) {
318 flags |= ShenandoahHeap::WEAK_ROOTS;
319 }
320 __ testb(gc_state, flags);
321 __ jcc(Assembler::zero, heap_stable);
322
323 Register tmp1 = noreg, tmp2 = noreg;
324 if (is_strong) {
325 // Test for object in cset
326 // Allocate temporary registers
327 for (int i = 0; i < 8; i++) {
328 Register r = as_Register(i);
329 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) {
330 if (tmp1 == noreg) {
331 tmp1 = r;
332 } else {
333 tmp2 = r;
334 break;
335 }
336 }
337 }
338 assert(tmp1 != noreg, "tmp1 allocated");
339 assert(tmp2 != noreg, "tmp2 allocated");
340 assert_different_registers(tmp1, tmp2, src.base(), src.index());
341 assert_different_registers(tmp1, tmp2, dst);
342
343 __ push(tmp1);
344 __ push(tmp2);
345
346 // Optimized cset-test
347 __ movptr(tmp1, dst);
348 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
349 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
350 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1));
351 __ testbool(tmp1);
352 __ jcc(Assembler::zero, not_cset);
353 }
354
355 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
356
357 // The rest is saved with the optimized path
358
359 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
360 __ subptr(rsp, num_saved_regs * wordSize);
361 uint slot = num_saved_regs;
362 if (dst != rax) {
363 __ movptr(Address(rsp, (--slot) * wordSize), rax);
364 }
365 __ movptr(Address(rsp, (--slot) * wordSize), rcx);
366 __ movptr(Address(rsp, (--slot) * wordSize), rdx);
367 __ movptr(Address(rsp, (--slot) * wordSize), rdi);
368 __ movptr(Address(rsp, (--slot) * wordSize), rsi);
369 __ movptr(Address(rsp, (--slot) * wordSize), r8);
370 __ movptr(Address(rsp, (--slot) * wordSize), r9);
371 __ movptr(Address(rsp, (--slot) * wordSize), r10);
372 __ movptr(Address(rsp, (--slot) * wordSize), r11);
373 // Save APX extended registers r16–r31 if enabled
374 if (UseAPX) {
375 __ movptr(Address(rsp, (--slot) * wordSize), r16);
376 __ movptr(Address(rsp, (--slot) * wordSize), r17);
377 __ movptr(Address(rsp, (--slot) * wordSize), r18);
378 __ movptr(Address(rsp, (--slot) * wordSize), r19);
379 __ movptr(Address(rsp, (--slot) * wordSize), r20);
380 __ movptr(Address(rsp, (--slot) * wordSize), r21);
381 __ movptr(Address(rsp, (--slot) * wordSize), r22);
382 __ movptr(Address(rsp, (--slot) * wordSize), r23);
383 __ movptr(Address(rsp, (--slot) * wordSize), r24);
384 __ movptr(Address(rsp, (--slot) * wordSize), r25);
385 __ movptr(Address(rsp, (--slot) * wordSize), r26);
386 __ movptr(Address(rsp, (--slot) * wordSize), r27);
387 __ movptr(Address(rsp, (--slot) * wordSize), r28);
388 __ movptr(Address(rsp, (--slot) * wordSize), r29);
389 __ movptr(Address(rsp, (--slot) * wordSize), r30);
390 __ movptr(Address(rsp, (--slot) * wordSize), r31);
391 }
392 // r12-r15 are callee saved in all calling conventions
393 assert(slot == 0, "must use all slots");
394
395 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1.
396 Register arg0 = c_rarg0, arg1 = c_rarg1;
397 if (dst == arg1) {
398 __ lea(arg0, src);
399 __ xchgptr(arg1, arg0);
400 } else {
401 __ lea(arg1, src);
402 __ movptr(arg0, dst);
403 }
404
405 if (is_strong) {
406 if (is_narrow) {
407 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1);
408 } else {
409 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1);
410 }
411 } else if (is_weak) {
412 if (is_narrow) {
413 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1);
414 } else {
415 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1);
416 }
417 } else {
418 assert(is_phantom, "only remaining strength");
419 assert(!is_narrow, "phantom access cannot be narrow");
420 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
421 }
422
423 // Restore APX extended registers r31–r16 if previously saved
424 if (UseAPX) {
425 __ movptr(r31, Address(rsp, (slot++) * wordSize));
426 __ movptr(r30, Address(rsp, (slot++) * wordSize));
427 __ movptr(r29, Address(rsp, (slot++) * wordSize));
428 __ movptr(r28, Address(rsp, (slot++) * wordSize));
429 __ movptr(r27, Address(rsp, (slot++) * wordSize));
430 __ movptr(r26, Address(rsp, (slot++) * wordSize));
431 __ movptr(r25, Address(rsp, (slot++) * wordSize));
432 __ movptr(r24, Address(rsp, (slot++) * wordSize));
433 __ movptr(r23, Address(rsp, (slot++) * wordSize));
434 __ movptr(r22, Address(rsp, (slot++) * wordSize));
435 __ movptr(r21, Address(rsp, (slot++) * wordSize));
436 __ movptr(r20, Address(rsp, (slot++) * wordSize));
437 __ movptr(r19, Address(rsp, (slot++) * wordSize));
438 __ movptr(r18, Address(rsp, (slot++) * wordSize));
439 __ movptr(r17, Address(rsp, (slot++) * wordSize));
440 __ movptr(r16, Address(rsp, (slot++) * wordSize));
441 }
442 __ movptr(r11, Address(rsp, (slot++) * wordSize));
443 __ movptr(r10, Address(rsp, (slot++) * wordSize));
444 __ movptr(r9, Address(rsp, (slot++) * wordSize));
445 __ movptr(r8, Address(rsp, (slot++) * wordSize));
446 __ movptr(rsi, Address(rsp, (slot++) * wordSize));
447 __ movptr(rdi, Address(rsp, (slot++) * wordSize));
448 __ movptr(rdx, Address(rsp, (slot++) * wordSize));
449 __ movptr(rcx, Address(rsp, (slot++) * wordSize));
450
451 if (dst != rax) {
452 __ movptr(dst, rax);
453 __ movptr(rax, Address(rsp, (slot++) * wordSize));
454 }
455
456 assert(slot == num_saved_regs, "must use all slots");
457 __ addptr(rsp, num_saved_regs * wordSize);
458
459 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
460
461 __ bind(not_cset);
462
463 if (is_strong) {
464 __ pop(tmp2);
465 __ pop(tmp1);
466 }
467
468 __ bind(heap_stable);
469
470 __ block_comment("} load_reference_barrier");
471 }
472
473 //
474 // Arguments:
475 //
476 // Inputs:
477 // src: oop location, might be clobbered
478 // tmp1: scratch register, might not be valid.
479 //
480 // Output:
481 // dst: oop loaded from src location
482 //
483 // Kill:
484 // tmp1 (if it is valid)
485 //
486 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
487 Register dst, Address src, Register tmp1) {
488 // 1: non-reference load, no additional barrier is needed
489 if (!is_reference_type(type)) {
490 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
491 return;
492 }
493
494 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
495
496 // 2: load a reference from src location and apply LRB if needed
497 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
498 Register result_dst = dst;
499 bool use_tmp1_for_dst = false;
500
501 // Preserve src location for LRB
502 if (dst == src.base() || dst == src.index()) {
503 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
504 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
505 dst = tmp1;
506 use_tmp1_for_dst = true;
507 } else {
508 dst = rdi;
509 __ push(dst);
510 }
511 assert_different_registers(dst, src.base(), src.index());
512 }
513
514 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
515
516 load_reference_barrier(masm, dst, src, decorators);
517
518 // Move loaded oop to final destination
519 if (dst != result_dst) {
520 __ movptr(result_dst, dst);
521
522 if (!use_tmp1_for_dst) {
523 __ pop(dst);
524 }
525
526 dst = result_dst;
527 }
528 } else {
529 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
530 }
531
532 // 3: apply keep-alive barrier if needed
533 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
534 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
535
536 assert_different_registers(dst, tmp1, r15_thread);
537 // Generate the SATB pre-barrier code to log the value of
538 // the referent field in an SATB buffer.
539 shenandoah_write_barrier_pre(masm /* masm */,
540 noreg /* obj */,
541 dst /* pre_val */,
542 tmp1 /* tmp */,
543 true /* tosca_live */,
544 true /* expand_call */);
545
546 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
547 }
548 }
549
550 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
551 assert(ShenandoahCardBarrier, "Should have been checked by caller");
552
553 // Does a store check for the oop in register obj. The content of
554 // register obj is destroyed afterwards.
555 __ shrptr(obj, CardTable::card_shift());
556
557 // We'll use this register as the TLS base address and also later on
558 // to hold the byte_map_base.
559 Register thread = r15_thread;
560 Register tmp = rscratch1;
561
562 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
563 __ movptr(tmp, curr_ct_holder_addr);
564 Address card_addr(tmp, obj, Address::times_1);
565
566 int dirty = CardTable::dirty_card_val();
567 if (UseCondCardMark) {
568 Label L_already_dirty;
569 __ cmpb(card_addr, dirty);
570 __ jccb(Assembler::equal, L_already_dirty);
571 __ movb(card_addr, dirty);
572 __ bind(L_already_dirty);
573 } else {
574 __ movb(card_addr, dirty);
575 }
576 }
577
578 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
579 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
580
581 bool on_oop = is_reference_type(type);
582 bool in_heap = (decorators & IN_HEAP) != 0;
583 bool as_normal = (decorators & AS_NORMAL) != 0;
584 if (on_oop && in_heap) {
585 bool needs_pre_barrier = as_normal;
586
587 // flatten object address if needed
588 // We do it regardless of precise because we need the registers
589 if (dst.index() == noreg && dst.disp() == 0) {
590 if (dst.base() != tmp1) {
591 __ movptr(tmp1, dst.base());
592 }
593 } else {
594 __ lea(tmp1, dst);
595 }
596
597 assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
598
599 if (needs_pre_barrier) {
600 shenandoah_write_barrier_pre(masm /*masm*/,
601 tmp1 /* obj */,
602 tmp2 /* pre_val */,
603 tmp3 /* tmp */,
604 val != noreg /* tosca_live */,
605 false /* expand_call */);
606 }
607
608 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
609 if (val != noreg) {
610 if (ShenandoahCardBarrier) {
611 store_check(masm, tmp1);
612 }
613 }
614 } else {
615 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
616 }
617 }
618
619 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
620 Register obj, Register tmp, Label& slowpath) {
621 Label done;
622 // Resolve jobject
623 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
624
625 // Check for null.
626 __ testptr(obj, obj);
627 __ jcc(Assembler::zero, done);
628
629 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
630 __ testb(gc_state, ShenandoahHeap::EVACUATION);
631 __ jccb(Assembler::notZero, slowpath);
632 __ bind(done);
633 }
634
635 // Special Shenandoah CAS implementation that handles false negatives
636 // due to concurrent evacuation.
637 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
638 Register res, Address addr, Register oldval, Register newval,
639 bool exchange, Register tmp1, Register tmp2) {
640 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
641 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
642 assert_different_registers(oldval, tmp1, tmp2);
643 assert_different_registers(newval, tmp1, tmp2);
644
645 Label L_success, L_failure;
646
647 // Remember oldval for retry logic below
648 if (UseCompressedOops) {
649 __ movl(tmp1, oldval);
650 } else {
651 __ movptr(tmp1, oldval);
652 }
653
654 // Step 1. Fast-path.
655 //
656 // Try to CAS with given arguments. If successful, then we are done.
657
658 if (UseCompressedOops) {
659 __ lock();
660 __ cmpxchgl(newval, addr);
661 } else {
662 __ lock();
663 __ cmpxchgptr(newval, addr);
664 }
665 __ jcc(Assembler::equal, L_success);
666
667 // Step 2. CAS had failed. This may be a false negative.
668 //
669 // The trouble comes when we compare the to-space pointer with the from-space
670 // pointer to the same object. To resolve this, it will suffice to resolve
671 // the value from memory -- this will give both to-space pointers.
672 // If they mismatch, then it was a legitimate failure.
673 //
674 // Before reaching to resolve sequence, see if we can avoid the whole shebang
675 // with filters.
676
677 // Filter: when offending in-memory value is null, the failure is definitely legitimate
678 if (UseCompressedOops) {
679 __ testl(oldval, oldval);
680 } else {
681 __ testptr(oldval, oldval);
682 }
683 __ jcc(Assembler::zero, L_failure);
684
685 // Filter: when heap is stable, the failure is definitely legitimate
686 const Register thread = r15_thread;
687 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
688 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
689 __ jcc(Assembler::zero, L_failure);
690
691 if (UseCompressedOops) {
692 __ movl(tmp2, oldval);
693 __ decode_heap_oop(tmp2);
694 } else {
695 __ movptr(tmp2, oldval);
696 }
697
698 // Decode offending in-memory value.
699 // Test if-forwarded
700 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
701 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded
702 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded
703
704 // Load and mask forwarding pointer
705 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
706 __ shrptr(tmp2, 2);
707 __ shlptr(tmp2, 2);
708
709 if (UseCompressedOops) {
710 __ decode_heap_oop(tmp1); // decode for comparison
711 }
712
713 // Now we have the forwarded offender in tmp2.
714 // Compare and if they don't match, we have legitimate failure
715 __ cmpptr(tmp1, tmp2);
716 __ jcc(Assembler::notEqual, L_failure);
717
718 // Step 3. Need to fix the memory ptr before continuing.
719 //
720 // At this point, we have from-space oldval in the register, and its to-space
721 // address is in tmp2. Let's try to update it into memory. We don't care if it
722 // succeeds or not. If it does, then the retrying CAS would see it and succeed.
723 // If this fixup fails, this means somebody else beat us to it, and necessarily
724 // with to-space ptr store. We still have to do the retry, because the GC might
725 // have updated the reference for us.
726
727 if (UseCompressedOops) {
728 __ encode_heap_oop(tmp2); // previously decoded at step 2.
729 }
730
731 if (UseCompressedOops) {
732 __ lock();
733 __ cmpxchgl(tmp2, addr);
734 } else {
735 __ lock();
736 __ cmpxchgptr(tmp2, addr);
737 }
738
739 // Step 4. Try to CAS again.
740 //
741 // This is guaranteed not to have false negatives, because oldval is definitely
742 // to-space, and memory pointer is to-space as well. Nothing is able to store
743 // from-space ptr into memory anymore. Make sure oldval is restored, after being
744 // garbled during retries.
745 //
746 if (UseCompressedOops) {
747 __ movl(oldval, tmp2);
748 } else {
749 __ movptr(oldval, tmp2);
750 }
751
752 if (UseCompressedOops) {
753 __ lock();
754 __ cmpxchgl(newval, addr);
755 } else {
756 __ lock();
757 __ cmpxchgptr(newval, addr);
758 }
759 if (!exchange) {
760 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
761 }
762
763 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
764 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
765 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
766
767 if (exchange) {
768 __ bind(L_failure);
769 __ bind(L_success);
770 } else {
771 assert(res != noreg, "need result register");
772
773 Label exit;
774 __ bind(L_failure);
775 __ xorptr(res, res);
776 __ jmpb(exit);
777
778 __ bind(L_success);
779 __ movptr(res, 1);
780 __ bind(exit);
781 }
782 }
783
784 #ifdef COMPILER2
785 void ShenandoahBarrierSetAssembler::load_ref_barrier_c2(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp1, Register tmp2, Register tmp3, bool narrow) {
786 if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
787 return;
788 }
789 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
790
791 ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp1, tmp2, tmp3, narrow);
792 stub->dont_preserve(obj);
793
794 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
795 int flags = ShenandoahHeap::HAS_FORWARDED;
796 bool is_strong = (node->barrier_data() & ShenandoahBarrierStrong) != 0;
797 if (!is_strong) {
798 flags |= ShenandoahHeap::WEAK_ROOTS;
799 }
800 __ testb(gc_state, flags);
801 __ jcc(Assembler::notZero, *stub->entry());
802 __ bind(*stub->continuation());
803 }
804
805 void ShenandoahBarrierSetAssembler::satb_barrier_c2(const MachNode* node, MacroAssembler* masm,
806 Register addr, Register preval, Register tmp) {
807 if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
808 return;
809 }
810 ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, preval, tmp);
811 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
812 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
813 __ testb(gc_state, ShenandoahHeap::MARKING);
814 __ jcc(Assembler::notZero, *stub->entry());
815 __ bind(*stub->continuation());
816 }
817
818 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm,
819 Register addr, Register addr_tmp, Register tmp) {
820 if (!ShenandoahCardBarrier ||
821 (node->barrier_data() & (ShenandoahBarrierCardMark | ShenandoahBarrierCardMarkNotNull)) == 0) {
822 return;
823 }
824 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
825 if (addr != noreg) {
826 __ mov(addr_tmp, addr);
827 }
828 __ shrptr(addr_tmp, CardTable::card_shift());
829
830 Address curr_ct_holder_addr(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
831 __ movptr(tmp, curr_ct_holder_addr);
832 Address card_addr(tmp, addr_tmp, Address::times_1);
833
834 int dirty = CardTable::dirty_card_val();
835 if (UseCondCardMark) {
836 Label L_already_dirty;
837 __ cmpb(card_addr, dirty);
838 __ jccb(Assembler::equal, L_already_dirty);
839 __ movb(card_addr, dirty);
840 __ bind(L_already_dirty);
841 } else {
842 __ movb(card_addr, dirty);
843 }
844 }
845
846 void ShenandoahBarrierSetAssembler::cmpxchg_oop_c2(const MachNode* node, MacroAssembler* masm,
847 Register res, Address addr, Register oldval, Register newval, Register tmp1, Register tmp2,
848 bool exchange) {
849 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
850 assert_different_registers(oldval, tmp1, tmp2);
851 assert_different_registers(newval, tmp1, tmp2);
852
853 ShenandoahCASBarrierSlowStubC2* const slow_stub = ShenandoahCASBarrierSlowStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, exchange);
854 ShenandoahCASBarrierMidStubC2* const mid_stub = ShenandoahCASBarrierMidStubC2::create(node, slow_stub, oldval, res, tmp1, exchange);
855
856 Label L_success, L_failure;
857
858 // Remember oldval for retry logic below. It will be overwritten by the CAS.
859 if (ShenandoahCASBarrier) {
860 __ movptr(tmp2, oldval);
861 }
862
863 // Step 1. Fast-path.
864 //
865 // Try to CAS with given arguments. If successful, then we are done.
866 __ lock();
867 if (UseCompressedOops) {
868 __ cmpxchgl(newval, addr);
869 } else {
870 __ cmpxchgptr(newval, addr);
871 }
872
873 if (!ShenandoahCASBarrier) {
874 if (!exchange) {
875 assert(res != noreg, "need result register");
876 __ setcc(Assembler::equal, res);
877 }
878 return;
879 }
880
881 __ jcc(Assembler::notEqual, *mid_stub->entry());
882
883 // Slow-stub re-enters with condition flags according to CAS, we may need to
884 // set result accordingly.
885 __ bind(*slow_stub->continuation());
886
887 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
888 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
889 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
890
891 if (!exchange) {
892 assert(res != noreg, "need result register");
893 __ setcc(Assembler::equal, res);
894 }
895
896 // Mid-stub re-enters with result set correctly.
897 __ bind(*mid_stub->continuation());
898 }
899
900 #undef __
901 #define __ masm.
902
903 void ShenandoahLoadRefBarrierStubC2::emit_code(MacroAssembler& masm) {
904 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
905 __ bind(*entry());
906
907 Register obj = _obj;
908 if (_narrow) {
909 __ movl(_tmp1, _obj);
910 __ decode_heap_oop(_tmp1);
911 obj = _tmp1;
912 }
913
914 // Weak/phantom loads always need to go to runtime.
915 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
916 __ movptr(_tmp2, obj);
917 __ shrptr(_tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
918 __ movptr(_tmp3, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
919 __ movbool(_tmp2, Address(_tmp2, _tmp3, Address::times_1));
920 __ testbool(_tmp2);
921 __ jcc(Assembler::zero, *continuation());
922 }
923
924 {
925 SaveLiveRegisters save_registers(&masm, this);
926 if (c_rarg0 != obj) {
927 if (c_rarg0 == _addr) {
928 __ movptr(_tmp2, _addr);
929 _addr = _tmp2;
930 }
931 __ movptr(c_rarg0, obj);
932 }
933 __ movptr(c_rarg1, _addr);
934
935 address entry;
936 if (_narrow) {
937 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
938 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
939 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
940 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
941 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
942 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
943 }
944 } else {
945 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
946 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
947 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
948 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
949 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
950 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
951 }
952 }
953 __ call(RuntimeAddress(entry), rax);
954 assert(!save_registers.contains(_obj), "must not save result register");
955 __ movptr(_obj, rax);
956 }
957 if (_narrow) {
958 __ encode_heap_oop(_obj);
959 }
960
961 __ jmp(*continuation());
962 }
963
964 void ShenandoahSATBBarrierStubC2::emit_code(MacroAssembler& masm) {
965 __ bind(*entry());
966 Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
967 Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
968
969 Label runtime;
970
971 // Do we need to load the previous value?
972 if (_addr != noreg) {
973 __ load_heap_oop(_preval, Address(_addr, 0), noreg, AS_RAW);
974 }
975 // Is the previous value null?
976 __ cmpptr(_preval, NULL_WORD);
977 __ jcc(Assembler::equal, *continuation());
978
979 // Can we store a value in the given thread's buffer?
980 // (The index field is typed as size_t.)
981 __ movptr(_tmp, index);
982 __ testptr(_tmp, _tmp);
983 __ jccb(Assembler::zero, runtime);
984 // The buffer is not full, store value into it.
985 __ subptr(_tmp, wordSize);
986 __ movptr(index, _tmp);
987 __ addptr(_tmp, buffer);
988 __ movptr(Address(_tmp, 0), _preval);
989
990 __ jmp(*continuation());
991
992 __ bind(runtime);
993 {
994 SaveLiveRegisters save_registers(&masm, this);
995 if (c_rarg0 != _preval) {
996 __ mov(c_rarg0, _preval);
997 }
998 // rax is a caller-saved, non-argument-passing register, so it does not
999 // interfere with c_rarg0 or c_rarg1. If it contained any live value before
1000 // entering this stub, it is saved at this point, and restored after the
1001 // call. If it did not contain any live value, it is free to be used. In
1002 // either case, it is safe to use it here as a call scratch register.
1003 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
1004 }
1005 __ jmp(*continuation());
1006 }
1007
1008 void ShenandoahCASBarrierMidStubC2::emit_code(MacroAssembler& masm) {
1009 __ bind(*entry());
1010
1011 if (!_cae) {
1012 // Set result to false, in case that we fail the following tests.
1013 // Failing those tests means legitimate failures.
1014 // Otherwise, result will be set correctly after returning from
1015 // the slow-path.
1016 if (UseCompressedOops) {
1017 __ movl(_result, 0);
1018 } else {
1019 __ movptr(_result, 0);
1020 }
1021 }
1022 // Check if CAS result is null. If it is, then we must have a legitimate failure.
1023 // This makes loading the fwdptr in the slow-path simpler.
1024 if (UseCompressedOops) {
1025 __ testl(_expected, _expected);
1026 } else {
1027 __ testptr(_expected, _expected);
1028 }
1029 __ jcc(Assembler::equal, *continuation());
1030
1031 // Check if GC is in progress, otherwise we must have a legitimate failure.
1032 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1033 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
1034 __ jcc(Assembler::notZero, *_slow_stub->entry());
1035 __ jmp(*continuation());
1036 }
1037
1038 void ShenandoahCASBarrierSlowStubC2::emit_code(MacroAssembler& masm) {
1039 __ bind(*entry());
1040
1041 assert(_expected == rax, "expected must be rax");
1042
1043 // Step 2. CAS has failed because the value held at addr does not
1044 // match expected. This may be a false negative because the value fetched
1045 // from addr (now held in result) may be a from-space pointer to the
1046 // original copy of same object referenced by to-space pointer expected.
1047 //
1048 // To resolve this, it suffices to find the forward pointer associated
1049 // with fetched value. If this matches expected, retry CAS with new
1050 // parameters. If this mismatches, then we have a legitimate
1051 // failure, and we're done.
1052
1053 // overwrite tmp1 with from-space pointer fetched from memory
1054 __ movptr(_tmp1, _expected);
1055
1056 if (UseCompressedOops) {
1057 __ decode_heap_oop_not_null(_tmp1);
1058 }
1059
1060 // Load/decode forwarding pointer.
1061 __ movq(_tmp1, Address(_tmp1, oopDesc::mark_offset_in_bytes()));
1062 // Negate the mark-word. This allows us to test lowest 2 bits easily while preserving the upper bits.
1063 __ negq(_tmp1);
1064 __ testq(_tmp1, markWord::lock_mask_in_place);
1065 // Not forwarded, must have a legit CAS failure.
1066 __ jcc(Assembler::notEqual, *continuation());
1067 // Set the lowest two bits. This is equivalent to clearing the two bits after
1068 // the subsequent inversion.
1069 __ orq(_tmp1, markWord::marked_value);
1070 // And invert back to get the forwardee.
1071 __ negq(_tmp1);
1072
1073 if (UseCompressedOops) {
1074 __ encode_heap_oop_not_null(_tmp1); // encode for comparison
1075 }
1076
1077 // Now we have the forwarded offender in tmp1.
1078 // We preserved the original expected value in tmp2 in the fast-path.
1079 // Compare and if they don't match, we have legitimate failure
1080 __ cmpptr(_tmp1, _tmp2);
1081 __ jcc(Assembler::notEqual, *continuation());
1082
1083 // Fall through to step 3.
1084
1085 // Step 3. We've confirmed that the value originally held in memory
1086 // (now held in result) pointed to from-space version of original
1087 // expected value. Try the CAS again with the from-space expected
1088 // value. If it now succeeds, we're good.
1089 //
1090 // Note: expected holds encoded from-space pointer that matches to-space
1091 // object residing at tmp1.
1092 __ lock();
1093 if (UseCompressedOops) {
1094 __ cmpxchgl(_new_val, _addr);
1095 } else {
1096 __ cmpxchgptr(_new_val, _addr);
1097 }
1098
1099 // If fetched value did not equal the new expected, this could
1100 // still be a false negative because some other (GC) thread may have
1101 // newly overwritten the memory value with its to-space equivalent.
1102 __ jcc(Assembler::equal, *continuation());
1103
1104 // Step 4. Try to CAS again, but with the original to-space expected.
1105 // This should be very rare.
1106 __ movptr(_expected, _tmp2);
1107 __ lock();
1108 if (UseCompressedOops) {
1109 __ cmpxchgl(_new_val, _addr);
1110 } else {
1111 __ cmpxchgptr(_new_val, _addr);
1112 }
1113 // At this point, there can no longer be false negatives.
1114 __ jmp(*continuation());
1115 }
1116
1117 #undef __
1118 #define __ masm->
1119 #endif
1120
1121 #ifdef PRODUCT
1122 #define BLOCK_COMMENT(str) /* nothing */
1123 #else
1124 #define BLOCK_COMMENT(str) __ block_comment(str)
1125 #endif
1126
1127 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
1128
1129 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
1130
1131 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
1132 Register addr, Register count,
1133 Register tmp) {
1134 assert(ShenandoahCardBarrier, "Should have been checked by caller");
1135
1136 Label L_loop, L_done;
1137 const Register end = count;
1138 assert_different_registers(addr, end);
1139
1140 // Zero count? Nothing to do.
1141 __ testl(count, count);
1142 __ jccb(Assembler::zero, L_done);
1143
1144 const Register thread = r15_thread;
1145 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
1146 __ movptr(tmp, curr_ct_holder_addr);
1147
1148 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
1149 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
1150 __ shrptr(addr, CardTable::card_shift());
1151 __ shrptr(end, CardTable::card_shift());
1152 __ subptr(end, addr); // end --> cards count
1153
1154 __ addptr(addr, tmp);
1155
1156 __ BIND(L_loop);
1157 __ movb(Address(addr, count, Address::times_1), 0);
1158 __ decrement(count);
1159 __ jccb(Assembler::greaterEqual, L_loop);
1160
1161 __ BIND(L_done);
1162 }
1163
1164 #undef __
1165
1166 #ifdef COMPILER1
1167
1168 #define __ ce->masm()->
1169
1170 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
1171 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1172 // At this point we know that marking is in progress.
1173 // If do_load() is true then we have to emit the
1174 // load of the previous value; otherwise it has already
1175 // been loaded into _pre_val.
1176
1177 __ bind(*stub->entry());
1178 assert(stub->pre_val()->is_register(), "Precondition.");
1179
1180 Register pre_val_reg = stub->pre_val()->as_register();
1181
1182 if (stub->do_load()) {
1183 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
1184 }
1185
1186 __ cmpptr(pre_val_reg, NULL_WORD);
1187 __ jcc(Assembler::equal, *stub->continuation());
1188 ce->store_parameter(stub->pre_val()->as_register(), 0);
1189 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
1190 __ jmp(*stub->continuation());
1191
1192 }
1193
1194 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
1195 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1196 __ bind(*stub->entry());
1197
1198 DecoratorSet decorators = stub->decorators();
1199 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1200 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1201 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1202 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1203
1204 Register obj = stub->obj()->as_register();
1205 Register res = stub->result()->as_register();
1206 Register addr = stub->addr()->as_pointer_register();
1207 Register tmp1 = stub->tmp1()->as_register();
1208 Register tmp2 = stub->tmp2()->as_register();
1209 assert_different_registers(obj, res, addr, tmp1, tmp2);
1210
1211 Label slow_path;
1212
1213 assert(res == rax, "result must arrive in rax");
1214
1215 if (res != obj) {
1216 __ mov(res, obj);
1217 }
1218
1219 if (is_strong) {
1220 // Check for object being in the collection set.
1221 __ mov(tmp1, res);
1222 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1223 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1224 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
1225 __ testbool(tmp2);
1226 __ jcc(Assembler::zero, *stub->continuation());
1227 }
1228
1229 __ bind(slow_path);
1230 ce->store_parameter(res, 0);
1231 ce->store_parameter(addr, 1);
1232 if (is_strong) {
1233 if (is_native) {
1234 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
1235 } else {
1236 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
1237 }
1238 } else if (is_weak) {
1239 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
1240 } else {
1241 assert(is_phantom, "only remaining strength");
1242 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
1243 }
1244 __ jmp(*stub->continuation());
1245 }
1246
1247 #undef __
1248
1249 #define __ sasm->
1250
1251 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
1252 __ prologue("shenandoah_pre_barrier", false);
1253 // arg0 : previous value of memory
1254
1255 __ push(rax);
1256 __ push(rdx);
1257
1258 const Register pre_val = rax;
1259 const Register thread = r15_thread;
1260 const Register tmp = rdx;
1261
1262 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1263 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1264
1265 Label done;
1266 Label runtime;
1267
1268 // Is SATB still active?
1269 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1270 __ testb(gc_state, ShenandoahHeap::MARKING);
1271 __ jcc(Assembler::zero, done);
1272
1273 // Can we store original value in the thread's buffer?
1274
1275 __ movptr(tmp, queue_index);
1276 __ testptr(tmp, tmp);
1277 __ jcc(Assembler::zero, runtime);
1278 __ subptr(tmp, wordSize);
1279 __ movptr(queue_index, tmp);
1280 __ addptr(tmp, buffer);
1281
1282 // prev_val (rax)
1283 __ load_parameter(0, pre_val);
1284 __ movptr(Address(tmp, 0), pre_val);
1285 __ jmp(done);
1286
1287 __ bind(runtime);
1288
1289 __ save_live_registers_no_oop_map(true);
1290
1291 // load the pre-value
1292 __ load_parameter(0, rcx);
1293 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), rcx);
1294
1295 __ restore_live_registers(true);
1296
1297 __ bind(done);
1298
1299 __ pop(rdx);
1300 __ pop(rax);
1301
1302 __ epilogue();
1303 }
1304
1305 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
1306 __ prologue("shenandoah_load_reference_barrier", false);
1307 // arg0 : object to be resolved
1308
1309 __ save_live_registers_no_oop_map(true);
1310
1311 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1312 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1313 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1314 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1315
1316 __ load_parameter(0, c_rarg0);
1317 __ load_parameter(1, c_rarg1);
1318 if (is_strong) {
1319 if (is_native) {
1320 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1321 } else {
1322 if (UseCompressedOops) {
1323 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1);
1324 } else {
1325 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1326 }
1327 }
1328 } else if (is_weak) {
1329 assert(!is_native, "weak must not be called off-heap");
1330 if (UseCompressedOops) {
1331 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
1332 } else {
1333 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
1334 }
1335 } else {
1336 assert(is_phantom, "only remaining strength");
1337 assert(is_native, "phantom must only be called off-heap");
1338 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1);
1339 }
1340
1341 __ restore_live_registers_except_rax(true);
1342
1343 __ epilogue();
1344 }
1345
1346 #undef __
1347
1348 #endif // COMPILER1