687 }
688
689 Node* BarrierSetC2::atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
690 C2AccessFence fence(access);
691 resolve_address(access);
692 return atomic_xchg_at_resolved(access, new_val, value_type);
693 }
694
695 Node* BarrierSetC2::atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
696 C2AccessFence fence(access);
697 resolve_address(access);
698 return atomic_add_at_resolved(access, new_val, value_type);
699 }
700
701 int BarrierSetC2::arraycopy_payload_base_offset(bool is_array) {
702 // Exclude the header but include array length to copy by 8 bytes words.
703 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
704 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
705 instanceOopDesc::base_offset_in_bytes();
706 // base_off:
707 // 8 - 32-bit VM or 64-bit VM, compact headers
708 // 12 - 64-bit VM, compressed klass
709 // 16 - 64-bit VM, normal klass
710 if (base_off % BytesPerLong != 0) {
711 assert(UseCompressedClassPointers, "");
712 assert(!UseCompactObjectHeaders, "");
713 if (is_array) {
714 // Exclude length to copy by 8 bytes words.
715 base_off += sizeof(int);
716 } else {
717 // Include klass to copy by 8 bytes words.
718 base_off = instanceOopDesc::klass_offset_in_bytes();
719 }
720 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
721 }
722 return base_off;
723 }
724
725 void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
726 int base_off = arraycopy_payload_base_offset(is_array);
727 Node* payload_size = size;
728 Node* offset = kit->MakeConX(base_off);
729 payload_size = kit->gvn().transform(new SubXNode(payload_size, offset));
730 if (is_array) {
731 // Ensure the array payload size is rounded up to the next BytesPerLong
732 // multiple when converting to double-words. This is necessary because array
733 // size does not include object alignment padding, so it might not be a
734 // multiple of BytesPerLong for sub-long element types.
735 payload_size = kit->gvn().transform(new AddXNode(payload_size, kit->MakeConX(BytesPerLong - 1)));
736 }
737 payload_size = kit->gvn().transform(new URShiftXNode(payload_size, kit->intcon(LogBytesPerLong)));
738 ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, payload_size, true, false);
739 if (is_array) {
740 ac->set_clone_array();
741 } else {
742 ac->set_clone_inst();
743 }
744 Node* n = kit->gvn().transform(ac);
745 if (n == ac) {
746 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
838 inline const TypeFunc* BarrierSetC2::clone_type() {
839 assert(BarrierSetC2::_clone_type_Type != nullptr, "should be initialized");
840 return BarrierSetC2::_clone_type_Type;
841 }
842
843 #define XTOP LP64_ONLY(COMMA phase->top())
844
845 void BarrierSetC2::clone_in_runtime(PhaseMacroExpand* phase, ArrayCopyNode* ac,
846 address clone_addr, const char* clone_name) const {
847 Node* const ctrl = ac->in(TypeFunc::Control);
848 Node* const mem = ac->in(TypeFunc::Memory);
849 Node* const src = ac->in(ArrayCopyNode::Src);
850 Node* const dst = ac->in(ArrayCopyNode::Dest);
851 Node* const size = ac->in(ArrayCopyNode::Length);
852
853 assert(size->bottom_type()->base() == Type_X,
854 "Should be of object size type (int for 32 bits, long for 64 bits)");
855
856 // The native clone we are calling here expects the object size in words.
857 // Add header/offset size to payload size to get object size.
858 Node* const base_offset = phase->MakeConX(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
859 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
860 // HeapAccess<>::clone expects size in heap words.
861 // For 64-bits platforms, this is a no-operation.
862 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
863 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
864
865 Node* const call = phase->make_leaf_call(ctrl,
866 mem,
867 clone_type(),
868 clone_addr,
869 clone_name,
870 TypeRawPtr::BOTTOM,
871 src, dst, full_size_in_heap_words XTOP);
872 phase->transform_later(call);
873 phase->igvn().replace_node(ac, call);
874 }
875
876 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
877 Node* ctrl = ac->in(TypeFunc::Control);
878 Node* mem = ac->in(TypeFunc::Memory);
|
687 }
688
689 Node* BarrierSetC2::atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
690 C2AccessFence fence(access);
691 resolve_address(access);
692 return atomic_xchg_at_resolved(access, new_val, value_type);
693 }
694
695 Node* BarrierSetC2::atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
696 C2AccessFence fence(access);
697 resolve_address(access);
698 return atomic_add_at_resolved(access, new_val, value_type);
699 }
700
701 int BarrierSetC2::arraycopy_payload_base_offset(bool is_array) {
702 // Exclude the header but include array length to copy by 8 bytes words.
703 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
704 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
705 instanceOopDesc::base_offset_in_bytes();
706 // base_off:
707 // 4 - compact headers
708 // 8 - 32-bit VM
709 // 12 - 64-bit VM, compressed klass
710 // 16 - 64-bit VM, normal klass
711 if (base_off % BytesPerLong != 0) {
712 assert(UseCompressedClassPointers, "");
713 if (is_array) {
714 // Exclude length to copy by 8 bytes words.
715 base_off += sizeof(int);
716 } else {
717 if (!UseCompactObjectHeaders) {
718 // Include klass to copy by 8 bytes words.
719 base_off = instanceOopDesc::klass_offset_in_bytes();
720 }
721 }
722 assert(base_off % BytesPerLong == 0 || UseCompactObjectHeaders, "expect 8 bytes alignment");
723 }
724 return base_off;
725 }
726
727 void BarrierSetC2::clone(GraphKit* kit, Node* src_base, Node* dst_base, Node* size, bool is_array) const {
728 int base_off = arraycopy_payload_base_offset(is_array);
729 if (UseCompactObjectHeaders && !is_aligned(base_off, BytesPerLong) &&
730 !kit->gvn().type(src_base)->isa_aryptr()) {
731 guarantee(is_aligned(base_off, BytesPerInt), "must be 4-bytes aligned");
732 // The optimized copy routine only copies 8-byte words. For this reason, we must
733 // copy the 4 bytes at offset 4 separately.
734 // Use the correct field-specific alias derived from the typed address, matching
735 // the pattern in PhaseMacroExpand::generate_arraycopy (macroArrayCopy.cpp).
736 // Using AliasIdxRaw would create a mismatch between the typed address and the
737 // raw memory chain, causing an escape analysis assertion failure.
738 //
739 // Skip this when src_base has an array type. With StressReflectiveCode, the
740 // instance path of the clone can be live in the IR even when the type system
741 // knows src_base is an array. The pre-copy is unnecessary on such paths (they
742 // are unreachable at runtime), and creating a LoadNode at the array length
743 // offset would assert (LoadRangeNode required).
744 Node* sptr = kit->basic_plus_adr(src_base, base_off);
745 Node* dptr = kit->basic_plus_adr(dst_base, base_off);
746 const TypePtr* s_adr_type = kit->gvn().type(sptr)->is_ptr();
747 const TypePtr* d_adr_type = kit->gvn().type(dptr)->is_ptr();
748 uint s_alias_idx = Compile::current()->get_alias_index(s_adr_type);
749 uint d_alias_idx = Compile::current()->get_alias_index(d_adr_type);
750 // This copies the first 4 bytes after the compact header (hash field
751 // or first instance field) as a raw int. The actual field at this
752 // offset may be a narrowOop, so the load/store must be marked as
753 // mismatched to avoid StoreN-vs-StoreI assertion failures during IGVN.
754 Node* first = kit->gvn().transform(LoadNode::make(kit->gvn(), kit->control(), kit->memory(s_alias_idx),
755 sptr, s_adr_type, TypeInt::INT, T_INT,
756 MemNode::unordered, LoadNode::DependsOnlyOnTest,
757 false /*require_atomic_access*/, false /*unaligned*/,
758 true /*mismatched*/));
759 Node* st = kit->gvn().transform(StoreNode::make(kit->gvn(), kit->control(), kit->memory(d_alias_idx),
760 dptr, d_adr_type,
761 first, T_INT, MemNode::unordered));
762 st->as_Store()->set_mismatched_access();
763 kit->set_memory(st, d_alias_idx);
764 kit->record_for_igvn(st);
765 base_off += sizeof(jint);
766 guarantee(is_aligned(base_off, BytesPerLong), "must be 8-bytes aligned");
767 }
768
769 Node* payload_size = size;
770 Node* offset = kit->MakeConX(base_off);
771 payload_size = kit->gvn().transform(new SubXNode(payload_size, offset));
772 if (is_array) {
773 // Ensure the array payload size is rounded up to the next BytesPerLong
774 // multiple when converting to double-words. This is necessary because array
775 // size does not include object alignment padding, so it might not be a
776 // multiple of BytesPerLong for sub-long element types.
777 payload_size = kit->gvn().transform(new AddXNode(payload_size, kit->MakeConX(BytesPerLong - 1)));
778 }
779 payload_size = kit->gvn().transform(new URShiftXNode(payload_size, kit->intcon(LogBytesPerLong)));
780 ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, offset, dst_base, offset, payload_size, true, false);
781 if (is_array) {
782 ac->set_clone_array();
783 } else {
784 ac->set_clone_inst();
785 }
786 Node* n = kit->gvn().transform(ac);
787 if (n == ac) {
788 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
880 inline const TypeFunc* BarrierSetC2::clone_type() {
881 assert(BarrierSetC2::_clone_type_Type != nullptr, "should be initialized");
882 return BarrierSetC2::_clone_type_Type;
883 }
884
885 #define XTOP LP64_ONLY(COMMA phase->top())
886
887 void BarrierSetC2::clone_in_runtime(PhaseMacroExpand* phase, ArrayCopyNode* ac,
888 address clone_addr, const char* clone_name) const {
889 Node* const ctrl = ac->in(TypeFunc::Control);
890 Node* const mem = ac->in(TypeFunc::Memory);
891 Node* const src = ac->in(ArrayCopyNode::Src);
892 Node* const dst = ac->in(ArrayCopyNode::Dest);
893 Node* const size = ac->in(ArrayCopyNode::Length);
894
895 assert(size->bottom_type()->base() == Type_X,
896 "Should be of object size type (int for 32 bits, long for 64 bits)");
897
898 // The native clone we are calling here expects the object size in words.
899 // Add header/offset size to payload size to get object size.
900 // Use the actual offset stored in the ArrayCopyNode (in bytes), not
901 // arraycopy_payload_base_offset(), because clone() may have bumped the
902 // offset past a 4-byte pre-copy for compact object headers.
903 Node* const base_offset = phase->transform_later(new URShiftXNode(ac->in(ArrayCopyNode::SrcPos), phase->intcon(LogBytesPerLong)));
904 Node* const full_size = phase->transform_later(new AddXNode(size, base_offset));
905 // HeapAccess<>::clone expects size in heap words.
906 // For 64-bits platforms, this is a no-operation.
907 // For 32-bits platforms, we need to multiply full_size by HeapWordsPerLong (2).
908 Node* const full_size_in_heap_words = phase->transform_later(new LShiftXNode(full_size, phase->intcon(LogHeapWordsPerLong)));
909
910 Node* const call = phase->make_leaf_call(ctrl,
911 mem,
912 clone_type(),
913 clone_addr,
914 clone_name,
915 TypeRawPtr::BOTTOM,
916 src, dst, full_size_in_heap_words XTOP);
917 phase->transform_later(call);
918 phase->igvn().replace_node(ac, call);
919 }
920
921 void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const {
922 Node* ctrl = ac->in(TypeFunc::Control);
923 Node* mem = ac->in(TypeFunc::Memory);
|