1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "runtime/javaThread.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "utilities/macros.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44 #ifdef COMPILER2
45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
46 #endif
47
48 #define __ masm->
49
50 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
51 if (handle_gpr) {
52 __ push_IU_state();
53 }
54
55 if (handle_fp) {
56 // Some paths can be reached from the c2i adapter with live fp arguments in registers.
57 assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call");
58
59 const int xmm_size = wordSize * 2;
60 __ subptr(rsp, xmm_size * 8);
61 __ movdbl(Address(rsp, xmm_size * 0), xmm0);
62 __ movdbl(Address(rsp, xmm_size * 1), xmm1);
63 __ movdbl(Address(rsp, xmm_size * 2), xmm2);
64 __ movdbl(Address(rsp, xmm_size * 3), xmm3);
65 __ movdbl(Address(rsp, xmm_size * 4), xmm4);
66 __ movdbl(Address(rsp, xmm_size * 5), xmm5);
67 __ movdbl(Address(rsp, xmm_size * 6), xmm6);
68 __ movdbl(Address(rsp, xmm_size * 7), xmm7);
69 }
70 }
71
72 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) {
73 if (handle_fp) {
74 const int xmm_size = wordSize * 2;
75 __ movdbl(xmm0, Address(rsp, xmm_size * 0));
76 __ movdbl(xmm1, Address(rsp, xmm_size * 1));
77 __ movdbl(xmm2, Address(rsp, xmm_size * 2));
78 __ movdbl(xmm3, Address(rsp, xmm_size * 3));
79 __ movdbl(xmm4, Address(rsp, xmm_size * 4));
80 __ movdbl(xmm5, Address(rsp, xmm_size * 5));
81 __ movdbl(xmm6, Address(rsp, xmm_size * 6));
82 __ movdbl(xmm7, Address(rsp, xmm_size * 7));
83 __ addptr(rsp, xmm_size * 8);
84 }
85
86 if (handle_gpr) {
87 __ pop_IU_state();
88 }
89 }
90
91 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
92 Register src, Register dst, Register count) {
93
94 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
95
96 if (is_reference_type(type)) {
97 if (ShenandoahCardBarrier) {
98 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
99 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
100 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
101
102 // We need to save the original element count because the array copy stub
103 // will destroy the value and we need it for the card marking barrier.
104 if (!checkcast) {
105 if (!obj_int) {
106 // Save count for barrier
107 __ movptr(r11, count);
108 } else if (disjoint) {
109 // Save dst in r11 in the disjoint case
110 __ movq(r11, dst);
111 }
112 }
113 }
114
115 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
116 Register thread = r15_thread;
117 assert_different_registers(src, dst, count, thread);
118
119 Label L_done;
120 // Short-circuit if count == 0.
121 __ testptr(count, count);
122 __ jcc(Assembler::zero, L_done);
123
124 // Avoid runtime call when not active.
125 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
126 int flags;
127 if (ShenandoahSATBBarrier && dest_uninitialized) {
128 flags = ShenandoahHeap::HAS_FORWARDED;
129 } else {
130 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
131 }
132 __ testb(gc_state, flags);
133 __ jcc(Assembler::zero, L_done);
134
135 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
136
137 assert(src == rdi, "expected");
138 assert(dst == rsi, "expected");
139 assert(count == rdx, "expected");
140 if (UseCompressedOops) {
141 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop),
142 src, dst, count);
143 } else {
144 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop),
145 src, dst, count);
146 }
147
148 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false);
149
150 __ bind(L_done);
151 }
152 }
153
154 }
155
156 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
157 Register src, Register dst, Register count) {
158
159 if (ShenandoahCardBarrier && is_reference_type(type)) {
160 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
161 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
162 bool obj_int = (type == T_OBJECT) && UseCompressedOops;
163 Register tmp = rax;
164
165 if (!checkcast) {
166 if (!obj_int) {
167 // Save count for barrier
168 count = r11;
169 } else if (disjoint) {
170 // Use the saved dst in the disjoint case
171 dst = r11;
172 }
173 } else {
174 tmp = rscratch1;
175 }
176 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
177 }
178 }
179
180 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
181 Register obj,
182 Register pre_val,
183 Register tmp,
184 bool tosca_live,
185 bool expand_call) {
186
187 if (ShenandoahSATBBarrier) {
188 satb_write_barrier_pre(masm, obj, pre_val, tmp, tosca_live, expand_call);
189 }
190 }
191
192 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
193 Register obj,
194 Register pre_val,
195 Register tmp,
196 bool tosca_live,
197 bool expand_call) {
198 // If expand_call is true then we expand the call_VM_leaf macro
199 // directly to skip generating the check by
200 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
201
202 const Register thread = r15_thread;
203
204 Label done;
205 Label runtime;
206
207 assert(pre_val != noreg, "check this code");
208
209 if (obj != noreg) {
210 assert_different_registers(obj, pre_val, tmp);
211 assert(pre_val != rax, "check this code");
212 }
213
214 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
215 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
216
217 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
218 __ testb(gc_state, ShenandoahHeap::MARKING);
219 __ jcc(Assembler::zero, done);
220
221 // Do we need to load the previous value?
222 if (obj != noreg) {
223 __ load_heap_oop(pre_val, Address(obj, 0), noreg, AS_RAW);
224 }
225
226 // Is the previous value null?
227 __ cmpptr(pre_val, NULL_WORD);
228 __ jcc(Assembler::equal, done);
229
230 // Can we store original value in the thread's buffer?
231 // Is index == 0?
232 // (The index field is typed as size_t.)
233
234 __ movptr(tmp, index); // tmp := *index_adr
235 __ cmpptr(tmp, 0); // tmp == 0?
236 __ jcc(Assembler::equal, runtime); // If yes, goto runtime
237
238 __ subptr(tmp, wordSize); // tmp := tmp - wordSize
239 __ movptr(index, tmp); // *index_adr := tmp
240 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
241
242 // Record the previous value
243 __ movptr(Address(tmp, 0), pre_val);
244 __ jmp(done);
245
246 __ bind(runtime);
247 // save the live input values
248 if(tosca_live) __ push(rax);
249
250 if (obj != noreg && obj != rax)
251 __ push(obj);
252
253 if (pre_val != rax)
254 __ push(pre_val);
255
256 // Calling the runtime using the regular call_VM_leaf mechanism generates
257 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
258 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr.
259 //
260 // If we care generating the pre-barrier without a frame (e.g. in the
261 // intrinsified Reference.get() routine) then ebp might be pointing to
262 // the caller frame and so this check will most likely fail at runtime.
263 //
264 // Expanding the call directly bypasses the generation of the check.
265 // So when we do not have have a full interpreter frame on the stack
266 // expand_call should be passed true.
267
268 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should
269 // pre_val be c_rarg1 (where the call prologue would copy thread argument).
270 // Note: this should not accidentally smash thread, because thread is always r15.
271 assert(thread != c_rarg0, "smashed arg");
272 if (c_rarg0 != pre_val) {
273 __ mov(c_rarg0, pre_val);
274 }
275
276 if (expand_call) {
277 assert(pre_val != c_rarg1, "smashed arg");
278 if (c_rarg1 != thread) {
279 __ mov(c_rarg1, thread);
280 }
281 // Already moved pre_val into c_rarg0 above
282 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), 1);
283 } else {
284 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), c_rarg0);
285 }
286
287 // save the live input values
288 if (pre_val != rax)
289 __ pop(pre_val);
290
291 if (obj != noreg && obj != rax)
292 __ pop(obj);
293
294 if(tosca_live) __ pop(rax);
295
296 __ bind(done);
297 }
298
299 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) {
300 assert(ShenandoahLoadRefBarrier, "Should be enabled");
301
302 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
303 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
304 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
305 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
306 bool is_narrow = UseCompressedOops && !is_native;
307
308 Label heap_stable, not_cset;
309
310 __ block_comment("load_reference_barrier { ");
311
312 // Check if GC is active
313 Register thread = r15_thread;
314
315 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
316 int flags = ShenandoahHeap::HAS_FORWARDED;
317 if (!is_strong) {
318 flags |= ShenandoahHeap::WEAK_ROOTS;
319 }
320 __ testb(gc_state, flags);
321 __ jcc(Assembler::zero, heap_stable);
322
323 Register tmp1 = noreg, tmp2 = noreg;
324 if (is_strong) {
325 // Test for object in cset
326 // Allocate temporary registers
327 for (int i = 0; i < 8; i++) {
328 Register r = as_Register(i);
329 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) {
330 if (tmp1 == noreg) {
331 tmp1 = r;
332 } else {
333 tmp2 = r;
334 break;
335 }
336 }
337 }
338 assert(tmp1 != noreg, "tmp1 allocated");
339 assert(tmp2 != noreg, "tmp2 allocated");
340 assert_different_registers(tmp1, tmp2, src.base(), src.index());
341 assert_different_registers(tmp1, tmp2, dst);
342
343 __ push(tmp1);
344 __ push(tmp2);
345
346 // Optimized cset-test
347 __ movptr(tmp1, dst);
348 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
349 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
350 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1));
351 __ testbool(tmp1);
352 __ jcc(Assembler::zero, not_cset);
353 }
354
355 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
356
357 // The rest is saved with the optimized path
358
359 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
360 __ subptr(rsp, num_saved_regs * wordSize);
361 uint slot = num_saved_regs;
362 if (dst != rax) {
363 __ movptr(Address(rsp, (--slot) * wordSize), rax);
364 }
365 __ movptr(Address(rsp, (--slot) * wordSize), rcx);
366 __ movptr(Address(rsp, (--slot) * wordSize), rdx);
367 __ movptr(Address(rsp, (--slot) * wordSize), rdi);
368 __ movptr(Address(rsp, (--slot) * wordSize), rsi);
369 __ movptr(Address(rsp, (--slot) * wordSize), r8);
370 __ movptr(Address(rsp, (--slot) * wordSize), r9);
371 __ movptr(Address(rsp, (--slot) * wordSize), r10);
372 __ movptr(Address(rsp, (--slot) * wordSize), r11);
373 // Save APX extended registers r16–r31 if enabled
374 if (UseAPX) {
375 __ movptr(Address(rsp, (--slot) * wordSize), r16);
376 __ movptr(Address(rsp, (--slot) * wordSize), r17);
377 __ movptr(Address(rsp, (--slot) * wordSize), r18);
378 __ movptr(Address(rsp, (--slot) * wordSize), r19);
379 __ movptr(Address(rsp, (--slot) * wordSize), r20);
380 __ movptr(Address(rsp, (--slot) * wordSize), r21);
381 __ movptr(Address(rsp, (--slot) * wordSize), r22);
382 __ movptr(Address(rsp, (--slot) * wordSize), r23);
383 __ movptr(Address(rsp, (--slot) * wordSize), r24);
384 __ movptr(Address(rsp, (--slot) * wordSize), r25);
385 __ movptr(Address(rsp, (--slot) * wordSize), r26);
386 __ movptr(Address(rsp, (--slot) * wordSize), r27);
387 __ movptr(Address(rsp, (--slot) * wordSize), r28);
388 __ movptr(Address(rsp, (--slot) * wordSize), r29);
389 __ movptr(Address(rsp, (--slot) * wordSize), r30);
390 __ movptr(Address(rsp, (--slot) * wordSize), r31);
391 }
392 // r12-r15 are callee saved in all calling conventions
393 assert(slot == 0, "must use all slots");
394
395 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1.
396 Register arg0 = c_rarg0, arg1 = c_rarg1;
397 if (dst == arg1) {
398 __ lea(arg0, src);
399 __ xchgptr(arg1, arg0);
400 } else {
401 __ lea(arg1, src);
402 __ movptr(arg0, dst);
403 }
404
405 if (is_strong) {
406 if (is_narrow) {
407 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1);
408 } else {
409 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1);
410 }
411 } else if (is_weak) {
412 if (is_narrow) {
413 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1);
414 } else {
415 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1);
416 }
417 } else {
418 assert(is_phantom, "only remaining strength");
419 assert(!is_narrow, "phantom access cannot be narrow");
420 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
421 }
422
423 // Restore APX extended registers r31–r16 if previously saved
424 if (UseAPX) {
425 __ movptr(r31, Address(rsp, (slot++) * wordSize));
426 __ movptr(r30, Address(rsp, (slot++) * wordSize));
427 __ movptr(r29, Address(rsp, (slot++) * wordSize));
428 __ movptr(r28, Address(rsp, (slot++) * wordSize));
429 __ movptr(r27, Address(rsp, (slot++) * wordSize));
430 __ movptr(r26, Address(rsp, (slot++) * wordSize));
431 __ movptr(r25, Address(rsp, (slot++) * wordSize));
432 __ movptr(r24, Address(rsp, (slot++) * wordSize));
433 __ movptr(r23, Address(rsp, (slot++) * wordSize));
434 __ movptr(r22, Address(rsp, (slot++) * wordSize));
435 __ movptr(r21, Address(rsp, (slot++) * wordSize));
436 __ movptr(r20, Address(rsp, (slot++) * wordSize));
437 __ movptr(r19, Address(rsp, (slot++) * wordSize));
438 __ movptr(r18, Address(rsp, (slot++) * wordSize));
439 __ movptr(r17, Address(rsp, (slot++) * wordSize));
440 __ movptr(r16, Address(rsp, (slot++) * wordSize));
441 }
442 __ movptr(r11, Address(rsp, (slot++) * wordSize));
443 __ movptr(r10, Address(rsp, (slot++) * wordSize));
444 __ movptr(r9, Address(rsp, (slot++) * wordSize));
445 __ movptr(r8, Address(rsp, (slot++) * wordSize));
446 __ movptr(rsi, Address(rsp, (slot++) * wordSize));
447 __ movptr(rdi, Address(rsp, (slot++) * wordSize));
448 __ movptr(rdx, Address(rsp, (slot++) * wordSize));
449 __ movptr(rcx, Address(rsp, (slot++) * wordSize));
450
451 if (dst != rax) {
452 __ movptr(dst, rax);
453 __ movptr(rax, Address(rsp, (slot++) * wordSize));
454 }
455
456 assert(slot == num_saved_regs, "must use all slots");
457 __ addptr(rsp, num_saved_regs * wordSize);
458
459 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true);
460
461 __ bind(not_cset);
462
463 if (is_strong) {
464 __ pop(tmp2);
465 __ pop(tmp1);
466 }
467
468 __ bind(heap_stable);
469
470 __ block_comment("} load_reference_barrier");
471 }
472
473 //
474 // Arguments:
475 //
476 // Inputs:
477 // src: oop location, might be clobbered
478 // tmp1: scratch register, might not be valid.
479 //
480 // Output:
481 // dst: oop loaded from src location
482 //
483 // Kill:
484 // tmp1 (if it is valid)
485 //
486 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
487 Register dst, Address src, Register tmp1) {
488 // 1: non-reference load, no additional barrier is needed
489 if (!is_reference_type(type)) {
490 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
491 return;
492 }
493
494 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
495
496 // 2: load a reference from src location and apply LRB if needed
497 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
498 Register result_dst = dst;
499 bool use_tmp1_for_dst = false;
500
501 // Preserve src location for LRB
502 if (dst == src.base() || dst == src.index()) {
503 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at()
504 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) {
505 dst = tmp1;
506 use_tmp1_for_dst = true;
507 } else {
508 dst = rdi;
509 __ push(dst);
510 }
511 assert_different_registers(dst, src.base(), src.index());
512 }
513
514 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
515
516 load_reference_barrier(masm, dst, src, decorators);
517
518 // Move loaded oop to final destination
519 if (dst != result_dst) {
520 __ movptr(result_dst, dst);
521
522 if (!use_tmp1_for_dst) {
523 __ pop(dst);
524 }
525
526 dst = result_dst;
527 }
528 } else {
529 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1);
530 }
531
532 // 3: apply keep-alive barrier if needed
533 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
534 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
535
536 assert_different_registers(dst, tmp1, r15_thread);
537 // Generate the SATB pre-barrier code to log the value of
538 // the referent field in an SATB buffer.
539 shenandoah_write_barrier_pre(masm /* masm */,
540 noreg /* obj */,
541 dst /* pre_val */,
542 tmp1 /* tmp */,
543 true /* tosca_live */,
544 true /* expand_call */);
545
546 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true);
547 }
548 }
549
550 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
551 assert(ShenandoahCardBarrier, "Should have been checked by caller");
552
553 // Does a store check for the oop in register obj. The content of
554 // register obj is destroyed afterwards.
555 __ shrptr(obj, CardTable::card_shift());
556
557 // We'll use this register as the TLS base address and also later on
558 // to hold the byte_map_base.
559 Register thread = r15_thread;
560 Register tmp = rscratch1;
561
562 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
563 __ movptr(tmp, curr_ct_holder_addr);
564 Address card_addr(tmp, obj, Address::times_1);
565
566 int dirty = CardTable::dirty_card_val();
567 if (UseCondCardMark) {
568 Label L_already_dirty;
569 __ cmpb(card_addr, dirty);
570 __ jccb(Assembler::equal, L_already_dirty);
571 __ movb(card_addr, dirty);
572 __ bind(L_already_dirty);
573 } else {
574 __ movb(card_addr, dirty);
575 }
576 }
577
578 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
579 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
580
581 bool on_oop = is_reference_type(type);
582 bool in_heap = (decorators & IN_HEAP) != 0;
583 bool as_normal = (decorators & AS_NORMAL) != 0;
584 if (on_oop && in_heap) {
585 bool needs_pre_barrier = as_normal;
586
587 // flatten object address if needed
588 // We do it regardless of precise because we need the registers
589 if (dst.index() == noreg && dst.disp() == 0) {
590 if (dst.base() != tmp1) {
591 __ movptr(tmp1, dst.base());
592 }
593 } else {
594 __ lea(tmp1, dst);
595 }
596
597 assert_different_registers(val, tmp1, tmp2, tmp3, r15_thread);
598
599 if (needs_pre_barrier) {
600 shenandoah_write_barrier_pre(masm /*masm*/,
601 tmp1 /* obj */,
602 tmp2 /* pre_val */,
603 tmp3 /* tmp */,
604 val != noreg /* tosca_live */,
605 false /* expand_call */);
606 }
607
608 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg);
609 if (val != noreg) {
610 if (ShenandoahCardBarrier) {
611 store_check(masm, tmp1);
612 }
613 }
614 } else {
615 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
616 }
617 }
618
619 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
620 Register obj, Register tmp, Label& slowpath) {
621 Label done;
622 // Resolve jobject
623 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
624
625 // Check for null.
626 __ testptr(obj, obj);
627 __ jcc(Assembler::zero, done);
628
629 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
630 __ testb(gc_state, ShenandoahHeap::EVACUATION);
631 __ jccb(Assembler::notZero, slowpath);
632 __ bind(done);
633 }
634
635 // Special Shenandoah CAS implementation that handles false negatives
636 // due to concurrent evacuation.
637 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
638 Register res, Address addr, Register oldval, Register newval,
639 bool exchange, Register tmp1, Register tmp2) {
640 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled");
641 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
642 assert_different_registers(oldval, tmp1, tmp2);
643 assert_different_registers(newval, tmp1, tmp2);
644
645 Label L_success, L_failure;
646
647 // Remember oldval for retry logic below
648 if (UseCompressedOops) {
649 __ movl(tmp1, oldval);
650 } else {
651 __ movptr(tmp1, oldval);
652 }
653
654 // Step 1. Fast-path.
655 //
656 // Try to CAS with given arguments. If successful, then we are done.
657
658 if (UseCompressedOops) {
659 __ lock();
660 __ cmpxchgl(newval, addr);
661 } else {
662 __ lock();
663 __ cmpxchgptr(newval, addr);
664 }
665 __ jcc(Assembler::equal, L_success);
666
667 // Step 2. CAS had failed. This may be a false negative.
668 //
669 // The trouble comes when we compare the to-space pointer with the from-space
670 // pointer to the same object. To resolve this, it will suffice to resolve
671 // the value from memory -- this will give both to-space pointers.
672 // If they mismatch, then it was a legitimate failure.
673 //
674 // Before reaching to resolve sequence, see if we can avoid the whole shebang
675 // with filters.
676
677 // Filter: when offending in-memory value is null, the failure is definitely legitimate
678 if (UseCompressedOops) {
679 __ testl(oldval, oldval);
680 } else {
681 __ testptr(oldval, oldval);
682 }
683 __ jcc(Assembler::zero, L_failure);
684
685 // Filter: when heap is stable, the failure is definitely legitimate
686 const Register thread = r15_thread;
687 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
688 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
689 __ jcc(Assembler::zero, L_failure);
690
691 if (UseCompressedOops) {
692 __ movl(tmp2, oldval);
693 __ decode_heap_oop(tmp2);
694 } else {
695 __ movptr(tmp2, oldval);
696 }
697
698 // Decode offending in-memory value.
699 // Test if-forwarded
700 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value);
701 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded
702 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded
703
704 // Load and mask forwarding pointer
705 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes()));
706 __ shrptr(tmp2, 2);
707 __ shlptr(tmp2, 2);
708
709 if (UseCompressedOops) {
710 __ decode_heap_oop(tmp1); // decode for comparison
711 }
712
713 // Now we have the forwarded offender in tmp2.
714 // Compare and if they don't match, we have legitimate failure
715 __ cmpptr(tmp1, tmp2);
716 __ jcc(Assembler::notEqual, L_failure);
717
718 // Step 3. Need to fix the memory ptr before continuing.
719 //
720 // At this point, we have from-space oldval in the register, and its to-space
721 // address is in tmp2. Let's try to update it into memory. We don't care if it
722 // succeeds or not. If it does, then the retrying CAS would see it and succeed.
723 // If this fixup fails, this means somebody else beat us to it, and necessarily
724 // with to-space ptr store. We still have to do the retry, because the GC might
725 // have updated the reference for us.
726
727 if (UseCompressedOops) {
728 __ encode_heap_oop(tmp2); // previously decoded at step 2.
729 }
730
731 if (UseCompressedOops) {
732 __ lock();
733 __ cmpxchgl(tmp2, addr);
734 } else {
735 __ lock();
736 __ cmpxchgptr(tmp2, addr);
737 }
738
739 // Step 4. Try to CAS again.
740 //
741 // This is guaranteed not to have false negatives, because oldval is definitely
742 // to-space, and memory pointer is to-space as well. Nothing is able to store
743 // from-space ptr into memory anymore. Make sure oldval is restored, after being
744 // garbled during retries.
745 //
746 if (UseCompressedOops) {
747 __ movl(oldval, tmp2);
748 } else {
749 __ movptr(oldval, tmp2);
750 }
751
752 if (UseCompressedOops) {
753 __ lock();
754 __ cmpxchgl(newval, addr);
755 } else {
756 __ lock();
757 __ cmpxchgptr(newval, addr);
758 }
759 if (!exchange) {
760 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump
761 }
762
763 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
764 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
765 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
766
767 if (exchange) {
768 __ bind(L_failure);
769 __ bind(L_success);
770 } else {
771 assert(res != noreg, "need result register");
772
773 Label exit;
774 __ bind(L_failure);
775 __ xorptr(res, res);
776 __ jmpb(exit);
777
778 __ bind(L_success);
779 __ movptr(res, 1);
780 __ bind(exit);
781 }
782 }
783
784 #ifdef COMPILER2
785 void ShenandoahBarrierSetAssembler::load_ref_barrier_c2(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp1, Register tmp2, Register tmp3, bool narrow) {
786 if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
787 return;
788 }
789 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
790
791 ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp1, tmp2, tmp3, narrow);
792 stub->dont_preserve(obj);
793
794 // Test for null.
795 if (narrow) {
796 __ testl(obj, obj);
797 } else {
798 __ testptr(obj, obj);
799 }
800 __ jccb(Assembler::zero, *stub->continuation());
801
802 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
803 int flags = ShenandoahHeap::HAS_FORWARDED;
804 bool is_strong = (node->barrier_data() & ShenandoahBarrierStrong) != 0;
805 if (!is_strong) {
806 flags |= ShenandoahHeap::WEAK_ROOTS;
807 }
808 __ testb(gc_state, flags);
809 __ jcc(Assembler::notZero, *stub->entry());
810 __ bind(*stub->continuation());
811 }
812
813 void ShenandoahBarrierSetAssembler::satb_barrier_c2(const MachNode* node, MacroAssembler* masm,
814 Register addr, Register preval, Register tmp) {
815 if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
816 return;
817 }
818 ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, preval, tmp);
819 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
820 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
821 __ testb(gc_state, ShenandoahHeap::MARKING);
822 __ jcc(Assembler::notZero, *stub->entry());
823 __ bind(*stub->continuation());
824 }
825
826 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm,
827 Register addr, Register addr_tmp, Register tmp) {
828 if (!ShenandoahCardBarrier ||
829 (node->barrier_data() & (ShenandoahBarrierCardMark | ShenandoahBarrierCardMarkNotNull)) == 0) {
830 return;
831 }
832 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
833 if (addr != noreg) {
834 __ mov(addr_tmp, addr);
835 }
836 __ shrptr(addr_tmp, CardTable::card_shift());
837
838 Address curr_ct_holder_addr(r15_thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
839 __ movptr(tmp, curr_ct_holder_addr);
840 Address card_addr(tmp, addr_tmp, Address::times_1);
841
842 int dirty = CardTable::dirty_card_val();
843 if (UseCondCardMark) {
844 Label L_already_dirty;
845 __ cmpb(card_addr, dirty);
846 __ jccb(Assembler::equal, L_already_dirty);
847 __ movb(card_addr, dirty);
848 __ bind(L_already_dirty);
849 } else {
850 __ movb(card_addr, dirty);
851 }
852 }
853
854 void ShenandoahBarrierSetAssembler::cmpxchg_oop_c2(const MachNode* node, MacroAssembler* masm,
855 Register res, Address addr, Register oldval, Register newval, Register tmp1, Register tmp2,
856 bool exchange) {
857 assert(oldval == rax, "must be in rax for implicit use in cmpxchg");
858 assert_different_registers(oldval, tmp1, tmp2);
859 assert_different_registers(newval, tmp1, tmp2);
860
861 ShenandoahCASBarrierSlowStubC2* const slow_stub = ShenandoahCASBarrierSlowStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, exchange);
862 ShenandoahCASBarrierMidStubC2* const mid_stub = ShenandoahCASBarrierMidStubC2::create(node, slow_stub, oldval, res, tmp1, exchange);
863
864 Label L_success, L_failure;
865
866 // Remember oldval for retry logic below. It will be overwritten by the CAS.
867 if (ShenandoahCASBarrier) {
868 __ movptr(tmp2, oldval);
869 }
870
871 // Step 1. Fast-path.
872 //
873 // Try to CAS with given arguments. If successful, then we are done.
874 __ lock();
875 if (UseCompressedOops) {
876 __ cmpxchgl(newval, addr);
877 } else {
878 __ cmpxchgptr(newval, addr);
879 }
880
881 if (!ShenandoahCASBarrier) {
882 if (!exchange) {
883 assert(res != noreg, "need result register");
884 __ setcc(Assembler::equal, res);
885 }
886 return;
887 }
888
889 __ jcc(Assembler::notEqual, *mid_stub->entry());
890
891 // Slow-stub re-enters with condition flags according to CAS, we may need to
892 // set result accordingly.
893 __ bind(*slow_stub->continuation());
894
895 // Step 5. If we need a boolean result out of CAS, set the flag appropriately.
896 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS.
897 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return.
898
899 if (!exchange) {
900 assert(res != noreg, "need result register");
901 __ setcc(Assembler::equal, res);
902 }
903
904 // Mid-stub re-enters with result set correctly.
905 __ bind(*mid_stub->continuation());
906 }
907
908 #undef __
909 #define __ masm.
910
911 void ShenandoahLoadRefBarrierStubC2::emit_code(MacroAssembler& masm) {
912 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
913 __ bind(*entry());
914
915 Register obj = _obj;
916 if (_narrow) {
917 __ movl(_tmp1, _obj);
918 __ decode_heap_oop(_tmp1);
919 obj = _tmp1;
920 }
921
922 // Weak/phantom loads always need to go to runtime.
923 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
924 __ movptr(_tmp2, obj);
925 __ shrptr(_tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
926 __ movptr(_tmp3, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
927 __ movbool(_tmp2, Address(_tmp2, _tmp3, Address::times_1));
928 __ testbool(_tmp2);
929 __ jcc(Assembler::zero, *continuation());
930 }
931
932 {
933 SaveLiveRegisters save_registers(&masm, this);
934 if (c_rarg0 != obj) {
935 if (c_rarg0 == _addr) {
936 __ movptr(_tmp2, _addr);
937 _addr = _tmp2;
938 }
939 __ movptr(c_rarg0, obj);
940 }
941 __ movptr(c_rarg1, _addr);
942
943 address entry;
944 if (_narrow) {
945 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
946 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
947 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
948 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
949 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
950 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
951 }
952 } else {
953 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
954 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
955 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
956 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
957 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
958 entry = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
959 }
960 }
961 __ call(RuntimeAddress(entry), rax);
962 assert(!save_registers.contains(_obj), "must not save result register");
963 __ movptr(_obj, rax);
964 }
965 if (_narrow) {
966 __ encode_heap_oop(_obj);
967 }
968
969 __ jmp(*continuation());
970 }
971
972 void ShenandoahSATBBarrierStubC2::emit_code(MacroAssembler& masm) {
973 __ bind(*entry());
974 Address index(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
975 Address buffer(r15_thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
976
977 Label runtime;
978
979 // Do we need to load the previous value?
980 if (_addr != noreg) {
981 __ load_heap_oop(_preval, Address(_addr, 0), noreg, AS_RAW);
982 }
983 // Is the previous value null?
984 __ cmpptr(_preval, NULL_WORD);
985 __ jcc(Assembler::equal, *continuation());
986
987 // Can we store a value in the given thread's buffer?
988 // (The index field is typed as size_t.)
989 __ movptr(_tmp, index);
990 __ testptr(_tmp, _tmp);
991 __ jccb(Assembler::zero, runtime);
992 // The buffer is not full, store value into it.
993 __ subptr(_tmp, wordSize);
994 __ movptr(index, _tmp);
995 __ addptr(_tmp, buffer);
996 __ movptr(Address(_tmp, 0), _preval);
997
998 __ jmp(*continuation());
999
1000 __ bind(runtime);
1001 {
1002 SaveLiveRegisters save_registers(&masm, this);
1003 if (c_rarg0 != _preval) {
1004 __ mov(c_rarg0, _preval);
1005 }
1006 // rax is a caller-saved, non-argument-passing register, so it does not
1007 // interfere with c_rarg0 or c_rarg1. If it contained any live value before
1008 // entering this stub, it is saved at this point, and restored after the
1009 // call. If it did not contain any live value, it is free to be used. In
1010 // either case, it is safe to use it here as a call scratch register.
1011 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2)), rax);
1012 }
1013 __ jmp(*continuation());
1014 }
1015
1016 void ShenandoahCASBarrierMidStubC2::emit_code(MacroAssembler& masm) {
1017 __ bind(*entry());
1018
1019 if (!_cae) {
1020 // Set result to false, in case that we fail the following tests.
1021 // Failing those tests means legitimate failures.
1022 // Otherwise, result will be set correctly after returning from
1023 // the slow-path.
1024 __ movl(_result, 0); // Result = false.
1025 }
1026 // Check if CAS result is null. If it is, then we must have a legitimate failure.
1027 // This makes loading the fwdptr in the slow-path simpler.
1028 if (UseCompressedOops) {
1029 __ testl(_expected, _expected);
1030 } else {
1031 __ testptr(_expected, _expected);
1032 }
1033 __ jcc(Assembler::equal, *continuation());
1034
1035 // Check if GC is in progress, otherwise we must have a legitimate failure.
1036 Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1037 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED);
1038 __ jcc(Assembler::notZero, *_slow_stub->entry());
1039 __ jmp(*continuation());
1040 }
1041
1042 void ShenandoahCASBarrierSlowStubC2::emit_code(MacroAssembler& masm) {
1043 __ bind(*entry());
1044
1045 assert(_expected == rax, "expected must be rax");
1046
1047 // Step 2. CAS has failed because the value held at addr does not
1048 // match expected. This may be a false negative because the value fetched
1049 // from addr (now held in result) may be a from-space pointer to the
1050 // original copy of same object referenced by to-space pointer expected.
1051 //
1052 // To resolve this, it suffices to find the forward pointer associated
1053 // with fetched value. If this matches expected, retry CAS with new
1054 // parameters. If this mismatches, then we have a legitimate
1055 // failure, and we're done.
1056
1057 // overwrite tmp1 with from-space pointer fetched from memory
1058 __ movptr(_tmp1, _expected);
1059
1060 if (UseCompressedOops) {
1061 __ decode_heap_oop_not_null(_tmp1);
1062 }
1063
1064 // Load/decode forwarding pointer.
1065 __ movq(_tmp1, Address(_tmp1, oopDesc::mark_offset_in_bytes()));
1066 // Negate the mark-word. This allows us to test lowest 2 bits easily while preserving the upper bits.
1067 __ negq(_tmp1);
1068 __ testq(_tmp1, markWord::lock_mask_in_place);
1069 // Not forwarded, must have a legit CAS failure.
1070 __ jcc(Assembler::notEqual, *continuation());
1071 // Set the lowest two bits. This is equivalent to clearing the two bits after
1072 // the subsequent inversion.
1073 __ orq(_tmp1, markWord::marked_value);
1074 // And invert back to get the forwardee.
1075 __ negq(_tmp1);
1076
1077 if (UseCompressedOops) {
1078 __ encode_heap_oop_not_null(_tmp1); // encode for comparison
1079 }
1080
1081 // Now we have the forwarded offender in tmp1.
1082 // We preserved the original expected value in tmp2 in the fast-path.
1083 // Compare and if they don't match, we have legitimate failure
1084 __ cmpptr(_tmp1, _tmp2);
1085 __ jcc(Assembler::notEqual, *continuation());
1086
1087 // Fall through to step 3.
1088
1089 // Step 3. We've confirmed that the value originally held in memory
1090 // (now held in result) pointed to from-space version of original
1091 // expected value. Try the CAS again with the from-space expected
1092 // value. If it now succeeds, we're good.
1093 //
1094 // Note: expected holds encoded from-space pointer that matches to-space
1095 // object residing at tmp1.
1096 __ lock();
1097 if (UseCompressedOops) {
1098 __ cmpxchgl(_new_val, _addr);
1099 } else {
1100 __ cmpxchgptr(_new_val, _addr);
1101 }
1102
1103 // If fetched value did not equal the new expected, this could
1104 // still be a false negative because some other (GC) thread may have
1105 // newly overwritten the memory value with its to-space equivalent.
1106 __ jcc(Assembler::equal, *continuation());
1107
1108 // Step 4. Try to CAS again, but with the original to-space expected.
1109 // This should be very rare.
1110 __ movptr(_expected, _tmp2);
1111 __ lock();
1112 if (UseCompressedOops) {
1113 __ cmpxchgl(_new_val, _addr);
1114 } else {
1115 __ cmpxchgptr(_new_val, _addr);
1116 }
1117 // At this point, there can no longer be false negatives.
1118 __ jmp(*continuation());
1119 }
1120
1121 #undef __
1122 #define __ masm->
1123 #endif
1124
1125 #ifdef PRODUCT
1126 #define BLOCK_COMMENT(str) /* nothing */
1127 #else
1128 #define BLOCK_COMMENT(str) __ block_comment(str)
1129 #endif
1130
1131 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
1132
1133 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
1134
1135 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
1136 Register addr, Register count,
1137 Register tmp) {
1138 assert(ShenandoahCardBarrier, "Should have been checked by caller");
1139
1140 Label L_loop, L_done;
1141 const Register end = count;
1142 assert_different_registers(addr, end);
1143
1144 // Zero count? Nothing to do.
1145 __ testl(count, count);
1146 __ jccb(Assembler::zero, L_done);
1147
1148 const Register thread = r15_thread;
1149 Address curr_ct_holder_addr(thread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
1150 __ movptr(tmp, curr_ct_holder_addr);
1151
1152 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
1153 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
1154 __ shrptr(addr, CardTable::card_shift());
1155 __ shrptr(end, CardTable::card_shift());
1156 __ subptr(end, addr); // end --> cards count
1157
1158 __ addptr(addr, tmp);
1159
1160 __ BIND(L_loop);
1161 __ movb(Address(addr, count, Address::times_1), 0);
1162 __ decrement(count);
1163 __ jccb(Assembler::greaterEqual, L_loop);
1164
1165 __ BIND(L_done);
1166 }
1167
1168 #undef __
1169
1170 #ifdef COMPILER1
1171
1172 #define __ ce->masm()->
1173
1174 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
1175 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1176 // At this point we know that marking is in progress.
1177 // If do_load() is true then we have to emit the
1178 // load of the previous value; otherwise it has already
1179 // been loaded into _pre_val.
1180
1181 __ bind(*stub->entry());
1182 assert(stub->pre_val()->is_register(), "Precondition.");
1183
1184 Register pre_val_reg = stub->pre_val()->as_register();
1185
1186 if (stub->do_load()) {
1187 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
1188 }
1189
1190 __ cmpptr(pre_val_reg, NULL_WORD);
1191 __ jcc(Assembler::equal, *stub->continuation());
1192 ce->store_parameter(stub->pre_val()->as_register(), 0);
1193 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
1194 __ jmp(*stub->continuation());
1195
1196 }
1197
1198 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
1199 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
1200 __ bind(*stub->entry());
1201
1202 DecoratorSet decorators = stub->decorators();
1203 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1204 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1205 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1206 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1207
1208 Register obj = stub->obj()->as_register();
1209 Register res = stub->result()->as_register();
1210 Register addr = stub->addr()->as_pointer_register();
1211 Register tmp1 = stub->tmp1()->as_register();
1212 Register tmp2 = stub->tmp2()->as_register();
1213 assert_different_registers(obj, res, addr, tmp1, tmp2);
1214
1215 Label slow_path;
1216
1217 assert(res == rax, "result must arrive in rax");
1218
1219 if (res != obj) {
1220 __ mov(res, obj);
1221 }
1222
1223 if (is_strong) {
1224 // Check for object being in the collection set.
1225 __ mov(tmp1, res);
1226 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint());
1227 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
1228 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
1229 __ testbool(tmp2);
1230 __ jcc(Assembler::zero, *stub->continuation());
1231 }
1232
1233 __ bind(slow_path);
1234 ce->store_parameter(res, 0);
1235 ce->store_parameter(addr, 1);
1236 if (is_strong) {
1237 if (is_native) {
1238 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin()));
1239 } else {
1240 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin()));
1241 }
1242 } else if (is_weak) {
1243 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin()));
1244 } else {
1245 assert(is_phantom, "only remaining strength");
1246 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin()));
1247 }
1248 __ jmp(*stub->continuation());
1249 }
1250
1251 #undef __
1252
1253 #define __ sasm->
1254
1255 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
1256 __ prologue("shenandoah_pre_barrier", false);
1257 // arg0 : previous value of memory
1258
1259 __ push(rax);
1260 __ push(rdx);
1261
1262 const Register pre_val = rax;
1263 const Register thread = r15_thread;
1264 const Register tmp = rdx;
1265
1266 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
1267 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
1268
1269 Label done;
1270 Label runtime;
1271
1272 // Is SATB still active?
1273 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
1274 __ testb(gc_state, ShenandoahHeap::MARKING);
1275 __ jcc(Assembler::zero, done);
1276
1277 // Can we store original value in the thread's buffer?
1278
1279 __ movptr(tmp, queue_index);
1280 __ testptr(tmp, tmp);
1281 __ jcc(Assembler::zero, runtime);
1282 __ subptr(tmp, wordSize);
1283 __ movptr(queue_index, tmp);
1284 __ addptr(tmp, buffer);
1285
1286 // prev_val (rax)
1287 __ load_parameter(0, pre_val);
1288 __ movptr(Address(tmp, 0), pre_val);
1289 __ jmp(done);
1290
1291 __ bind(runtime);
1292
1293 __ save_live_registers_no_oop_map(true);
1294
1295 // load the pre-value
1296 __ load_parameter(0, rcx);
1297 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), rcx);
1298
1299 __ restore_live_registers(true);
1300
1301 __ bind(done);
1302
1303 __ pop(rdx);
1304 __ pop(rax);
1305
1306 __ epilogue();
1307 }
1308
1309 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) {
1310 __ prologue("shenandoah_load_reference_barrier", false);
1311 // arg0 : object to be resolved
1312
1313 __ save_live_registers_no_oop_map(true);
1314
1315 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
1316 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
1317 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
1318 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
1319
1320 __ load_parameter(0, c_rarg0);
1321 __ load_parameter(1, c_rarg1);
1322 if (is_strong) {
1323 if (is_native) {
1324 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1325 } else {
1326 if (UseCompressedOops) {
1327 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1);
1328 } else {
1329 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1);
1330 }
1331 }
1332 } else if (is_weak) {
1333 assert(!is_native, "weak must not be called off-heap");
1334 if (UseCompressedOops) {
1335 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1);
1336 } else {
1337 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1);
1338 }
1339 } else {
1340 assert(is_phantom, "only remaining strength");
1341 assert(is_native, "phantom must only be called off-heap");
1342 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1);
1343 }
1344
1345 __ restore_live_registers_except_rax(true);
1346
1347 __ epilogue();
1348 }
1349
1350 #undef __
1351
1352 #endif // COMPILER1