31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "runtime/javaThread.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44
45 #define __ masm->
46
47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
48 Register src, Register dst, Register count, RegSet saved_regs) {
49 if (is_oop) {
50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) {
52
53 Label done;
54
55 // Avoid calling runtime if count == 0
56 __ beqz(count, done);
57
58 // Is GC active?
59 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
60 assert_different_registers(src, dst, count, t0);
61
62 __ lbu(t0, gc_state);
63 if (ShenandoahSATBBarrier && dest_uninitialized) {
64 __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS);
65 __ beqz(t0, done);
66 } else {
67 __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
68 __ beqz(t0, done);
69 }
70
71 __ push_reg(saved_regs, sp);
72 if (UseCompressedOops) {
73 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry),
74 src, dst, count);
75 } else {
76 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
77 }
78 __ pop_reg(saved_regs, sp);
79 __ bind(done);
80 }
81 }
82 }
83
84 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
85 Register obj,
86 Register pre_val,
87 Register thread,
88 Register tmp,
89 bool tosca_live,
90 bool expand_call) {
91 if (ShenandoahSATBBarrier) {
92 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
93 }
94 }
95
96 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
97 Register obj,
98 Register pre_val,
99 Register thread,
100 Register tmp1,
101 Register tmp2,
102 bool tosca_live,
103 bool expand_call) {
104 // If expand_call is true then we expand the call_VM_leaf macro
105 // directly to skip generating the check by
106 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
107 assert(thread == xthread, "must be");
108
109 Label done;
110 Label runtime;
111
112 assert_different_registers(obj, pre_val, tmp1, tmp2);
113 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
114
115 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
116 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
117 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
118
119 // Is marking active?
120 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
121 __ lwu(tmp1, in_progress);
122 } else {
123 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
124 __ lbu(tmp1, in_progress);
125 }
126 __ beqz(tmp1, done);
127
128 // Do we need to load the previous value?
129 if (obj != noreg) {
130 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
131 }
132
133 // Is the previous value null?
134 __ beqz(pre_val, done);
135
136 // Can we store original value in the thread's buffer?
137 // Is index == 0?
138 // (The index field is typed as size_t.)
139 __ ld(tmp1, index); // tmp := *index_adr
140 __ beqz(tmp1, runtime); // tmp == 0? If yes, goto runtime
141
142 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
143 __ sd(tmp1, index); // *index_adr := tmp
144 __ ld(tmp2, buffer);
145 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
146
147 // Record the previous value
148 __ sd(pre_val, Address(tmp1, 0));
149 __ j(done);
150
151 __ bind(runtime);
152 // save the live input values
153 RegSet saved = RegSet::of(pre_val);
154 if (tosca_live) saved += RegSet::of(x10);
155 if (obj != noreg) saved += RegSet::of(obj);
156
157 __ push_reg(saved, sp);
158
159 // Calling the runtime using the regular call_VM_leaf mechanism generates
160 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
161 // that checks that the *(rfp+frame::interpreter_frame_last_sp) is null.
162 //
295 } else {
296 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
297 }
298 } else {
299 assert(is_phantom, "only remaining strength");
300 assert(!is_narrow, "phantom access cannot be narrow");
301 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
302 }
303 __ call(target);
304 __ mv(t0, x10);
305 __ pop_call_clobbered_registers();
306 __ mv(x10, t0);
307 __ bind(not_cset);
308 __ mv(result_dst, x10);
309 __ pop_reg(saved_regs, sp);
310
311 __ bind(heap_stable);
312 __ leave();
313 }
314
315 void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) {
316 if (ShenandoahIUBarrier) {
317 __ push_call_clobbered_registers();
318
319 satb_write_barrier_pre(masm, noreg, dst, xthread, tmp, t0, true, false);
320
321 __ pop_call_clobbered_registers();
322 }
323 }
324
325 //
326 // Arguments:
327 //
328 // Inputs:
329 // src: oop location to load from, might be clobbered
330 //
331 // Output:
332 // dst: oop loaded from src location
333 //
334 // Kill:
335 // x30 (tmp reg)
336 //
337 // Alias:
338 // dst: x30 (might use x30 as temporary output register to avoid clobbering src)
339 //
340 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
341 DecoratorSet decorators,
342 BasicType type,
343 Register dst,
344 Address src,
379 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
380 }
381
382 // 3: apply keep-alive barrier if needed
383 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
384 __ enter();
385 __ push_call_clobbered_registers();
386 satb_write_barrier_pre(masm /* masm */,
387 noreg /* obj */,
388 dst /* pre_val */,
389 xthread /* thread */,
390 tmp1 /* tmp1 */,
391 tmp2 /* tmp2 */,
392 true /* tosca_live */,
393 true /* expand_call */);
394 __ pop_call_clobbered_registers();
395 __ leave();
396 }
397 }
398
399 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
400 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
401 bool on_oop = is_reference_type(type);
402 if (!on_oop) {
403 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
404 return;
405 }
406
407 // flatten object address if needed
408 if (dst.offset() == 0) {
409 if (dst.base() != tmp3) {
410 __ mv(tmp3, dst.base());
411 }
412 } else {
413 __ la(tmp3, dst);
414 }
415
416 shenandoah_write_barrier_pre(masm,
417 tmp3 /* obj */,
418 tmp2 /* pre_val */,
419 xthread /* thread */,
420 tmp1 /* tmp */,
421 val != noreg /* tosca_live */,
422 false /* expand_call */);
423
424 if (val == noreg) {
425 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
426 } else {
427 iu_barrier(masm, val, tmp1);
428 // G1 barrier needs uncompressed oop for region cross check.
429 Register new_val = val;
430 if (UseCompressedOops) {
431 new_val = t1;
432 __ mv(new_val, val);
433 }
434 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
435 }
436 }
437
438 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
439 Register obj, Register tmp, Label& slowpath) {
440 Label done;
441 // Resolve jobject
442 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
443
444 // Check for null.
445 __ beqz(obj, done);
446
447 assert(obj != t1, "need t1");
448 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
449 __ lbu(t1, gc_state);
450
451 // Check for heap in evacuation phase
452 __ test_bit(t0, t1, ShenandoahHeap::EVACUATION_BITPOS);
453 __ bnez(t0, slowpath);
454
522 __ bne(t0, t1, retry);
523
524 __ bind(success);
525 if (is_cae) {
526 __ mv(result, expected);
527 } else {
528 __ mv(result, 1);
529 }
530 __ j(done);
531
532 __ bind(fail);
533 if (is_cae) {
534 __ mv(result, t0);
535 } else {
536 __ mv(result, zr);
537 }
538
539 __ bind(done);
540 }
541
542 #undef __
543
544 #ifdef COMPILER1
545
546 #define __ ce->masm()->
547
548 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
549 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
550 // At this point we know that marking is in progress.
551 // If do_load() is true then we have to emit the
552 // load of the previous value; otherwise it has already
553 // been loaded into _pre_val.
554 __ bind(*stub->entry());
555
556 assert(stub->pre_val()->is_register(), "Precondition.");
557
558 Register pre_val_reg = stub->pre_val()->as_register();
559
560 if (stub->do_load()) {
561 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */);
|
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "runtime/javaThread.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44
45 #define __ masm->
46
47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
48 Register src, Register dst, Register count, RegSet saved_regs) {
49 if (is_oop) {
50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
52
53 Label done;
54
55 // Avoid calling runtime if count == 0
56 __ beqz(count, done);
57
58 // Is GC active?
59 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
60 assert_different_registers(src, dst, count, t0);
61
62 __ lbu(t0, gc_state);
63 if (ShenandoahSATBBarrier && dest_uninitialized) {
64 __ test_bit(t0, t0, ShenandoahHeap::HAS_FORWARDED_BITPOS);
65 __ beqz(t0, done);
66 } else {
67 __ andi(t0, t0, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
68 __ beqz(t0, done);
69 }
70
71 __ push_reg(saved_regs, sp);
72 if (UseCompressedOops) {
73 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry),
74 src, dst, count);
75 } else {
76 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
77 }
78 __ pop_reg(saved_regs, sp);
79 __ bind(done);
80 }
81 }
82 }
83
84 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
85 Register start, Register count, Register tmp, RegSet saved_regs) {
86 if (ShenandoahCardBarrier && is_oop) {
87 gen_write_ref_array_post_barrier(masm, decorators, start, count, tmp, saved_regs);
88 }
89 }
90
91 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
92 Register obj,
93 Register pre_val,
94 Register thread,
95 Register tmp,
96 bool tosca_live,
97 bool expand_call) {
98 if (ShenandoahSATBBarrier) {
99 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, t0, tosca_live, expand_call);
100 }
101 }
102
103 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
104 Register obj,
105 Register pre_val,
106 Register thread,
107 Register tmp1,
108 Register tmp2,
109 bool tosca_live,
110 bool expand_call) {
111 // If expand_call is true then we expand the call_VM_leaf macro
112 // directly to skip generating the check by
113 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
114 assert(thread == xthread, "must be");
115
116 Label done;
117 Label runtime;
118
119 assert_different_registers(obj, pre_val, tmp1, tmp2);
120 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
121
122 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
123 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
124
125 // Is marking active?
126 Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
127 __ lbu(t1, gc_state);
128 __ test_bit(t1, t1, ShenandoahHeap::MARKING_BITPOS);
129 __ beqz(t1, done);
130
131 // Do we need to load the previous value?
132 if (obj != noreg) {
133 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
134 }
135
136 // Is the previous value null?
137 __ beqz(pre_val, done);
138
139 // Can we store original value in the thread's buffer?
140 // Is index == 0?
141 // (The index field is typed as size_t.)
142 __ ld(tmp1, index); // tmp := *index_adr
143 __ beqz(tmp1, runtime); // tmp == 0? If yes, goto runtime
144
145 __ sub(tmp1, tmp1, wordSize); // tmp := tmp - wordSize
146 __ sd(tmp1, index); // *index_adr := tmp
147 __ ld(tmp2, buffer);
148 __ add(tmp1, tmp1, tmp2); // tmp := tmp + *buffer_adr
149
150 // Record the previous value
151 __ sd(pre_val, Address(tmp1, 0));
152 __ j(done);
153
154 __ bind(runtime);
155 // save the live input values
156 RegSet saved = RegSet::of(pre_val);
157 if (tosca_live) saved += RegSet::of(x10);
158 if (obj != noreg) saved += RegSet::of(obj);
159
160 __ push_reg(saved, sp);
161
162 // Calling the runtime using the regular call_VM_leaf mechanism generates
163 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
164 // that checks that the *(rfp+frame::interpreter_frame_last_sp) is null.
165 //
298 } else {
299 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
300 }
301 } else {
302 assert(is_phantom, "only remaining strength");
303 assert(!is_narrow, "phantom access cannot be narrow");
304 target = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
305 }
306 __ call(target);
307 __ mv(t0, x10);
308 __ pop_call_clobbered_registers();
309 __ mv(x10, t0);
310 __ bind(not_cset);
311 __ mv(result_dst, x10);
312 __ pop_reg(saved_regs, sp);
313
314 __ bind(heap_stable);
315 __ leave();
316 }
317
318 //
319 // Arguments:
320 //
321 // Inputs:
322 // src: oop location to load from, might be clobbered
323 //
324 // Output:
325 // dst: oop loaded from src location
326 //
327 // Kill:
328 // x30 (tmp reg)
329 //
330 // Alias:
331 // dst: x30 (might use x30 as temporary output register to avoid clobbering src)
332 //
333 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm,
334 DecoratorSet decorators,
335 BasicType type,
336 Register dst,
337 Address src,
372 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
373 }
374
375 // 3: apply keep-alive barrier if needed
376 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
377 __ enter();
378 __ push_call_clobbered_registers();
379 satb_write_barrier_pre(masm /* masm */,
380 noreg /* obj */,
381 dst /* pre_val */,
382 xthread /* thread */,
383 tmp1 /* tmp1 */,
384 tmp2 /* tmp2 */,
385 true /* tosca_live */,
386 true /* expand_call */);
387 __ pop_call_clobbered_registers();
388 __ leave();
389 }
390 }
391
392 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) {
393 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
394
395 __ srli(obj, obj, CardTable::card_shift());
396
397 assert(CardTable::dirty_card_val() == 0, "must be");
398
399 Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
400 __ ld(t1, curr_ct_holder_addr);
401 __ add(t1, obj, t1);
402
403 if (UseCondCardMark) {
404 Label L_already_dirty;
405 __ lbu(t0, Address(t1));
406 __ beqz(t0, L_already_dirty);
407 __ sb(zr, Address(t1));
408 __ bind(L_already_dirty);
409 } else {
410 __ sb(zr, Address(t1));
411 }
412 }
413
414 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
415 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
416 bool on_oop = is_reference_type(type);
417 if (!on_oop) {
418 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
419 return;
420 }
421
422 // flatten object address if needed
423 if (dst.offset() == 0) {
424 if (dst.base() != tmp3) {
425 __ mv(tmp3, dst.base());
426 }
427 } else {
428 __ la(tmp3, dst);
429 }
430
431 shenandoah_write_barrier_pre(masm,
432 tmp3 /* obj */,
433 tmp2 /* pre_val */,
434 xthread /* thread */,
435 tmp1 /* tmp */,
436 val != noreg /* tosca_live */,
437 false /* expand_call */);
438
439 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
440
441 bool in_heap = (decorators & IN_HEAP) != 0;
442 bool needs_post_barrier = (val != noreg) && in_heap && ShenandoahCardBarrier;
443 if (needs_post_barrier) {
444 store_check(masm, tmp3);
445 }
446 }
447
448 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
449 Register obj, Register tmp, Label& slowpath) {
450 Label done;
451 // Resolve jobject
452 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
453
454 // Check for null.
455 __ beqz(obj, done);
456
457 assert(obj != t1, "need t1");
458 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
459 __ lbu(t1, gc_state);
460
461 // Check for heap in evacuation phase
462 __ test_bit(t0, t1, ShenandoahHeap::EVACUATION_BITPOS);
463 __ bnez(t0, slowpath);
464
532 __ bne(t0, t1, retry);
533
534 __ bind(success);
535 if (is_cae) {
536 __ mv(result, expected);
537 } else {
538 __ mv(result, 1);
539 }
540 __ j(done);
541
542 __ bind(fail);
543 if (is_cae) {
544 __ mv(result, t0);
545 } else {
546 __ mv(result, zr);
547 }
548
549 __ bind(done);
550 }
551
552 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
553 Register start, Register count, Register tmp, RegSet saved_regs) {
554 assert(ShenandoahCardBarrier, "Did you mean to enable ShenandoahCardBarrier?");
555
556 Label L_loop, L_done;
557 const Register end = count;
558
559 // Zero count? Nothing to do.
560 __ beqz(count, L_done);
561
562 // end = start + count << LogBytesPerHeapOop
563 // last element address to make inclusive
564 __ shadd(end, count, start, tmp, LogBytesPerHeapOop);
565 __ sub(end, end, BytesPerHeapOop);
566 __ srli(start, start, CardTable::card_shift());
567 __ srli(end, end, CardTable::card_shift());
568
569 // number of bytes to copy
570 __ sub(count, end, start);
571
572 Address curr_ct_holder_addr(xthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
573 __ ld(tmp, curr_ct_holder_addr);
574 __ add(start, start, tmp);
575
576 __ bind(L_loop);
577 __ add(tmp, start, count);
578 __ sb(zr, Address(tmp));
579 __ sub(count, count, 1);
580 __ bgez(count, L_loop);
581 __ bind(L_done);
582 }
583
584 #undef __
585
586 #ifdef COMPILER1
587
588 #define __ ce->masm()->
589
590 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
591 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
592 // At this point we know that marking is in progress.
593 // If do_load() is true then we have to emit the
594 // load of the previous value; otherwise it has already
595 // been loaded into _pre_val.
596 __ bind(*stub->entry());
597
598 assert(stub->pre_val()->is_register(), "Precondition.");
599
600 Register pre_val_reg = stub->pre_val()->as_register();
601
602 if (stub->do_load()) {
603 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /* wide */);
|