24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interp_masm.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "runtime/javaThread.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44
45 #define __ masm->
46
47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
48 Register src, Register dst, Register count, RegSet saved_regs) {
49 if (is_oop) {
50 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
52
53 Label done;
54
55 // Avoid calling runtime if count == 0
56 __ cbz(count, done);
57
58 // Is GC active?
59 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
60 __ ldrb(rscratch1, gc_state);
61 if (ShenandoahSATBBarrier && dest_uninitialized) {
62 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
63 } else {
64 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
65 __ tst(rscratch1, rscratch2);
66 __ br(Assembler::EQ, done);
589 if (is_cae) {
590 // We're falling through to done to indicate success. Success
591 // with is_cae is denoted by returning the value of expected as
592 // result.
593 __ mov(tmp2, expected);
594 }
595
596 __ bind(done);
597 // At entry to done, the Z (EQ) flag is on iff if the CAS
598 // operation was successful. Additionally, if is_cae, tmp2 holds
599 // the value most recently fetched from addr. In this case, success
600 // is denoted by tmp2 matching expected.
601
602 if (is_cae) {
603 __ mov(result, tmp2);
604 } else {
605 __ cset(result, Assembler::EQ);
606 }
607 }
608
609 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
610 Register start, Register count, Register scratch) {
611 assert(ShenandoahCardBarrier, "Should have been checked by caller");
612
613 Label L_loop, L_done;
614 const Register end = count;
615
616 // Zero count? Nothing to do.
617 __ cbz(count, L_done);
618
619 // end = start + count << LogBytesPerHeapOop
620 // last element address to make inclusive
621 __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop)));
622 __ sub(end, end, BytesPerHeapOop);
623 __ lsr(start, start, CardTable::card_shift());
624 __ lsr(end, end, CardTable::card_shift());
625
626 // number of bytes to copy
627 __ sub(count, end, start);
628
|
24 */
25
26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
27 #include "gc/shenandoah/mode/shenandoahMode.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahForwarding.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
33 #include "gc/shenandoah/shenandoahRuntime.hpp"
34 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
35 #include "interpreter/interp_masm.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "runtime/javaThread.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
43 #endif
44 #ifdef COMPILER2
45 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
46 #endif
47
48 #define __ masm->
49
50 #ifdef PRODUCT
51 #define BLOCK_COMMENT(str) /* nothing */
52 #else
53 #define BLOCK_COMMENT(str) __ block_comment(str)
54 #endif
55
56 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
57 Register src, Register dst, Register count, RegSet saved_regs) {
58 if (is_oop) {
59 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
60 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) {
61
62 Label done;
63
64 // Avoid calling runtime if count == 0
65 __ cbz(count, done);
66
67 // Is GC active?
68 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
69 __ ldrb(rscratch1, gc_state);
70 if (ShenandoahSATBBarrier && dest_uninitialized) {
71 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
72 } else {
73 __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
74 __ tst(rscratch1, rscratch2);
75 __ br(Assembler::EQ, done);
598 if (is_cae) {
599 // We're falling through to done to indicate success. Success
600 // with is_cae is denoted by returning the value of expected as
601 // result.
602 __ mov(tmp2, expected);
603 }
604
605 __ bind(done);
606 // At entry to done, the Z (EQ) flag is on iff if the CAS
607 // operation was successful. Additionally, if is_cae, tmp2 holds
608 // the value most recently fetched from addr. In this case, success
609 // is denoted by tmp2 matching expected.
610
611 if (is_cae) {
612 __ mov(result, tmp2);
613 } else {
614 __ cset(result, Assembler::EQ);
615 }
616 }
617
618 #ifdef COMPILER2
619 void ShenandoahBarrierSetAssembler::load_ref_barrier_c2(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp, bool narrow, bool maybe_null) {
620 assert_different_registers(obj, addr, tmp);
621 BLOCK_COMMENT("load_ref_barrier_c2 {");
622 if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
623 return;
624 }
625 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
626 ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp, noreg, noreg, narrow);
627
628 // Don't preserve the obj across the runtime call, we override it from the return value anyway.
629 stub->dont_preserve(obj);
630 if (tmp != noreg) {
631 stub->dont_preserve(tmp); // temp, no need to save
632 }
633
634 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
635 __ ldrb(rscratch1, gc_state);
636
637 // Check if GC marking is in progress or we are handling a weak reference, otherwise we don't have to do anything.
638 bool is_strong = (node->barrier_data() & ShenandoahBarrierStrong) != 0;
639 if (is_strong) {
640 __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
641 __ b(*stub->entry());
642 } else {
643 static_assert(ShenandoahHeap::HAS_FORWARDED_BITPOS == 0, "Relied on in LRB check below.");
644 __ orr(tmp, rscratch1, rscratch1, Assembler::LSR, ShenandoahHeap::WEAK_ROOTS_BITPOS);
645 __ tbz(tmp, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
646 __ b(*stub->entry());
647 }
648
649 __ bind(*stub->continuation());
650 BLOCK_COMMENT("} load_ref_barrier_c2");
651 }
652
653 void ShenandoahBarrierSetAssembler::load_ref_barrier_c3(const MachNode* node, MacroAssembler* masm, Register obj, Register addr, Register tmp, bool narrow, bool maybe_null, Register gc_state) {
654 BLOCK_COMMENT("load_ref_barrier_c3 {");
655 if (!ShenandoahLoadRefBarrierStubC2::needs_barrier(node)) {
656 return;
657 }
658 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
659 ShenandoahLoadRefBarrierStubC2* const stub = ShenandoahLoadRefBarrierStubC2::create(node, obj, addr, tmp, noreg, noreg, narrow);
660
661 // Don't preserve the obj across the runtime call, we override it from the return value anyway.
662 stub->dont_preserve(obj);
663
664 // Check if GC marking is in progress or we are handling a weak reference,
665 // otherwise we don't have to do anything. The code below was optimized to
666 // use less registers and instructions as possible at the expense of always
667 // having a branch instruction. The reason why we use this particular branch
668 // scheme is because the stub entry may be too far for the tbnz to jump to.
669 bool is_strong = (node->barrier_data() & ShenandoahBarrierStrong) != 0;
670 if (is_strong) {
671 __ tbz(gc_state, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
672 __ b(*stub->entry());
673 } else {
674 static_assert(ShenandoahHeap::HAS_FORWARDED_BITPOS == 0, "Relied on in LRB check below.");
675 __ orr(tmp, gc_state, gc_state, Assembler::LSR, ShenandoahHeap::WEAK_ROOTS_BITPOS);
676 __ tbz(tmp, ShenandoahHeap::HAS_FORWARDED_BITPOS, *stub->continuation());
677 __ b(*stub->entry());
678 }
679
680 __ bind(*stub->continuation());
681 BLOCK_COMMENT("} load_ref_barrier_c3");
682 }
683
684 void ShenandoahBarrierSetAssembler::satb_barrier_c3(const MachNode* node, MacroAssembler* masm, Register addr, Register pre_val, Register gc_state) {
685 assert_different_registers(addr, pre_val);
686 if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
687 return;
688 }
689
690 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
691 ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, pre_val);
692
693 // Check if GC marking is in progress, otherwise we don't have to do anything.
694 __ tstw(gc_state, ShenandoahHeap::MARKING);
695 __ br(Assembler::NE, *stub->entry());
696 __ bind(*stub->continuation());
697 }
698
699 void ShenandoahBarrierSetAssembler::satb_barrier_c2(const MachNode* node, MacroAssembler* masm, Register addr, Register pre_val) {
700 assert_different_registers(addr, pre_val);
701 if (!ShenandoahSATBBarrierStubC2::needs_barrier(node)) {
702 return;
703 }
704 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
705 ShenandoahSATBBarrierStubC2* const stub = ShenandoahSATBBarrierStubC2::create(node, addr, pre_val);
706
707 // Check if GC marking is in progress, otherwise we don't have to do anything.
708 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
709 __ ldrb(rscratch1, gc_state);
710 __ tstw(rscratch1, ShenandoahHeap::MARKING);
711 __ br(Assembler::NE, *stub->entry());
712 __ bind(*stub->continuation());
713 }
714
715 void ShenandoahBarrierSetAssembler::card_barrier_c2(const MachNode* node, MacroAssembler* masm, Register addr, Register tmp) {
716 if (!ShenandoahCardBarrier ||
717 (node->barrier_data() & (ShenandoahBarrierCardMark | ShenandoahBarrierCardMarkNotNull)) == 0) {
718 return;
719 }
720
721 Assembler::InlineSkippedInstructionsCounter skip_counter(masm);
722 __ lsr(tmp, addr, CardTable::card_shift());
723
724 assert(CardTable::dirty_card_val() == 0, "must be");
725
726 Address curr_ct_holder_addr(rthread, in_bytes(ShenandoahThreadLocalData::card_table_offset()));
727 __ ldr(rscratch1, curr_ct_holder_addr);
728
729 if (UseCondCardMark) {
730 Label L_already_dirty;
731 __ ldrb(rscratch2, Address(tmp, rscratch1));
732 __ cbz(rscratch2, L_already_dirty);
733 __ strb(zr, Address(tmp, rscratch1));
734 __ bind(L_already_dirty);
735 } else {
736 __ strb(zr, Address(tmp, rscratch1));
737 }
738 }
739
740 void ShenandoahBarrierSetAssembler::cmpxchg_oop_c2(const MachNode* node,
741 MacroAssembler* masm,
742 Register addr, Register oldval,
743 Register newval, Register res,
744 Register tmp1, Register tmp2,
745 bool acquire, bool release, bool weak, bool exchange) {
746 BLOCK_COMMENT("cmpxchg_oop_c2 {");
747 assert(res != noreg, "need result register");
748 assert_different_registers(oldval, addr, res, tmp1, tmp2);
749 assert_different_registers(newval, addr, res, tmp1, tmp2);
750
751 // Fast-path: Try to CAS optimistically. If successful, then we are done.
752 // EQ flag set iff success. 'tmp2' holds value fetched.
753 Assembler::operand_size size = UseCompressedOops ? Assembler::word : Assembler::xword;
754 __ cmpxchg(addr, oldval, newval, size, acquire, release, weak, tmp2);
755
756 // If we need a boolean result out of CAS, set the flag appropriately. This
757 // would be the final result if we do not go slow.
758 if (!exchange) {
759 __ cset(res, Assembler::EQ);
760 } else {
761 __ mov(res, tmp2);
762 }
763
764 if (ShenandoahCASBarrier) {
765 ShenandoahCASBarrierSlowStubC2* const slow_stub =
766 ShenandoahCASBarrierSlowStubC2::create(node, addr, oldval, newval, res, tmp1, tmp2, exchange, acquire, release, weak);
767
768 slow_stub->dont_preserve(res); // set at the end, no need to save
769 slow_stub->dont_preserve(oldval); // saved explicitly
770 slow_stub->dont_preserve(tmp1); // temp, no need to save
771 slow_stub->dont_preserve(tmp2); // temp, no need to save
772
773 // On success, we do not need any additional handling.
774 __ br(Assembler::EQ, *slow_stub->continuation());
775
776 // If GC is in progress, it is likely we need additional handling for false negatives.
777 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
778 __ ldrb(tmp1, gc_state);
779 __ tbz(tmp1, ShenandoahHeap::HAS_FORWARDED_BITPOS, *slow_stub->continuation());
780 __ b(*slow_stub->entry());
781
782 // Slow stub re-enters with result set correctly.
783 __ bind(*slow_stub->continuation());
784 }
785
786 BLOCK_COMMENT("} cmpxchg_oop_c2");
787 }
788
789 #undef __
790 #define __ masm.
791
792 void ShenandoahLoadRefBarrierStubC2::emit_code(MacroAssembler& masm) {
793 BLOCK_COMMENT("ShenandoahLoadRefBarrierStubC2::emit_code {");
794 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
795 __ bind(*entry());
796 Register obj = _obj;
797 if (_narrow) {
798 __ decode_heap_oop(_tmp1, _obj);
799 obj = _tmp1;
800 }
801 // Weak/phantom loads always need to go to runtime.
802 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
803 // Check for object in cset.
804 __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
805 __ lsr(rscratch1, obj, ShenandoahHeapRegion::region_size_bytes_shift_jint());
806 __ ldrb(rscratch2, Address(rscratch2, rscratch1));
807 __ cbz(rscratch2, *continuation());
808 }
809 {
810 SaveLiveRegisters save_registers(&masm, this);
811 if (c_rarg0 != obj) {
812 if (c_rarg0 == _addr) {
813 __ mov(rscratch1, _addr);
814 _addr = rscratch1;
815 }
816 __ mov(c_rarg0, obj);
817 }
818 __ mov(c_rarg1, _addr);
819
820 if (_narrow) {
821 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
822 __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow));
823 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
824 __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow));
825 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
826 __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow));
827 }
828 } else {
829 if ((_node->barrier_data() & ShenandoahBarrierStrong) != 0) {
830 __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong));
831 } else if ((_node->barrier_data() & ShenandoahBarrierWeak) != 0) {
832 __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak));
833 } else if ((_node->barrier_data() & ShenandoahBarrierPhantom) != 0) {
834 __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
835 }
836 }
837 __ blr(rscratch1);
838 __ mov(_obj, r0);
839 }
840 if (_narrow) {
841 __ encode_heap_oop(_obj);
842 }
843 __ b(*continuation());
844 BLOCK_COMMENT("} ShenandoahLoadRefBarrierStubC2::emit_code");
845 }
846
847 void ShenandoahSATBBarrierStubC2::emit_code(MacroAssembler& masm) {
848 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
849 __ bind(*entry());
850 // Do we need to load the previous value?
851 if (_addr != noreg) {
852 __ load_heap_oop(_preval, Address(_addr, 0), noreg, noreg, AS_RAW);
853 }
854
855 Address index(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
856 Address buffer(rthread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
857 Label runtime;
858 __ ldr(rscratch1, index);
859 // If buffer is full, call into runtime.
860 __ cbz(rscratch1, runtime);
861
862 // The buffer is not full, store value into it.
863 __ sub(rscratch1, rscratch1, wordSize);
864 __ str(rscratch1, index);
865 __ ldr(rscratch2, buffer);
866 __ str(_preval, Address(rscratch2, rscratch1));
867 __ b(*continuation());
868
869 // Runtime call
870 __ bind(runtime);
871 {
872 SaveLiveRegisters save_registers(&masm, this);
873 if (c_rarg0 != _preval) {
874 __ mov(c_rarg0, _preval);
875 }
876 __ mov(rscratch1, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre_c2));
877 __ blr(rscratch1);
878 }
879 __ b(*continuation());
880 }
881
882 void ShenandoahCASBarrierMidStubC2::emit_code(MacroAssembler& masm) {
883 Assembler::InlineSkippedInstructionsCounter skip_counter(&masm);
884 __ bind(*entry());
885
886 // Check if CAS result is null. If it is, then we must have a legitimate failure.
887 // This makes loading the fwdptr in the slow-path simpler.
888 __ tst(_result, _result);
889 // In case of !CAE, this has the correct value for legitimate failure (0/false)
890 // in result register.
891 __ br(Assembler::EQ, *continuation());
892
893 // Check if GC is in progress, otherwise we must have a legitimate failure.
894 Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
895 __ ldrb(_tmp, gc_state);
896 __ tstw(_tmp, ShenandoahHeap::HAS_FORWARDED);
897 __ br(Assembler::NE, *_slow_stub->entry());
898
899 if (!_cae) {
900 __ mov(_result, 0); // result = false
901 }
902 __ b(*continuation());
903 }
904
905 void ShenandoahCASBarrierSlowStubC2::emit_code(MacroAssembler& masm) {
906 __ bind(*entry());
907
908 // CAS has failed because the value held at addr does not match expected.
909 // This may be a false negative because the version in memory might be
910 // the from-space version of the same object we currently hold to-space
911 // reference for.
912 //
913 // To resolve this, we need to pass the location through the LRB fixup,
914 // this will make sure that the location has only to-space pointers.
915 // To avoid calling into runtime often, we cset-check the object first.
916 // We can inline most of the work here, but there is little point,
917 // as CAS failures over cset locations must be rare. This fast-slow split
918 // matches what we do for normal LRB.
919
920 // Non-strong references should always go to runtime. We do not expect
921 // CASes over non-strong locations.
922 assert((_node->barrier_data() & ShenandoahBarrierStrong) != 0, "Only strong references for CASes");
923
924 Label L_final;
925
926 // (Compressed) failure witness is in _tmp2.
927 // Unpack it and check if it is in collection set.
928 // We need to backup the compressed version to use in the LRB.
929 __ mov(_result, _tmp2);
930 if (UseCompressedOops) {
931 __ decode_heap_oop(_tmp2);
932 }
933
934 __ mov(_tmp1, ShenandoahHeap::in_cset_fast_test_addr());
935 __ lsr(_tmp2, _tmp2, ShenandoahHeapRegion::region_size_bytes_shift_jint());
936 __ ldrb(_tmp1, Address(_tmp1, _tmp2));
937 __ cbz(_tmp1, L_final);
938
939 {
940 SaveLiveRegisters save_registers(&masm, this);
941 // Load up failure witness again.
942 __ mov(c_rarg0, _result);
943 if (UseCompressedOops) {
944 __ decode_heap_oop(c_rarg0);
945 }
946 __ mov(c_rarg1, _addr_reg);
947
948 if (UseCompressedOops) {
949 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), 2);
950 } else {
951 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), 2);
952 }
953 // We have called LRB to fix up the heap location. We do not care about its
954 // result, as we will just try to CAS the location again.
955 }
956
957 __ bind(L_final);
958
959 Assembler::operand_size size = UseCompressedOops ? Assembler::word : Assembler::xword;
960 __ cmpxchg(_addr_reg, _expected, _new_val, size, _acquire, _release, _weak, _result);
961
962 if (!_cae) {
963 __ cset(_result, Assembler::EQ);
964 }
965 __ b(*continuation());
966 }
967 #undef __
968 #define __ masm->
969 #endif // COMPILER2
970
971 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
972 Register start, Register count, Register scratch) {
973 assert(ShenandoahCardBarrier, "Should have been checked by caller");
974
975 Label L_loop, L_done;
976 const Register end = count;
977
978 // Zero count? Nothing to do.
979 __ cbz(count, L_done);
980
981 // end = start + count << LogBytesPerHeapOop
982 // last element address to make inclusive
983 __ lea(end, Address(start, count, Address::lsl(LogBytesPerHeapOop)));
984 __ sub(end, end, BytesPerHeapOop);
985 __ lsr(start, start, CardTable::card_shift());
986 __ lsr(end, end, CardTable::card_shift());
987
988 // number of bytes to copy
989 __ sub(count, end, start);
990
|