37 // (Note: We could improve performance by ignoring the low bits of size,
38 // and putting a short cleanup loop after each bulk copy loop.
39 // There are plenty of other ways to make this faster also,
40 // and it's a slippery slope. For now, let's keep this code simple
41 // since the simplicity helps clarify the atomicity semantics of
42 // this operation. There are also CPU-specific assembly versions
43 // which may or may not want to include such optimizations.)
44
45 if (bits % sizeof(jlong) == 0) {
46 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
47 } else if (bits % sizeof(jint) == 0) {
48 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
49 } else if (bits % sizeof(jshort) == 0) {
50 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
51 } else {
52 // Not aligned, so no need to be atomic.
53 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
54 }
55 }
56
57 class CopySwap : AllStatic {
58 public:
59 /**
60 * Copy and optionally byte swap elements
61 *
62 * <swap> - true if elements should be byte swapped
63 *
64 * @param src address of source
65 * @param dst address of destination
66 * @param byte_count number of bytes to copy
67 * @param elem_size size of the elements to copy-swap
68 */
69 template<bool swap>
70 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
71 assert(src != nullptr, "address must not be null");
72 assert(dst != nullptr, "address must not be null");
73 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
74 "incorrect element size: " SIZE_FORMAT, elem_size);
75 assert(is_aligned(byte_count, elem_size),
76 "byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size);
|
37 // (Note: We could improve performance by ignoring the low bits of size,
38 // and putting a short cleanup loop after each bulk copy loop.
39 // There are plenty of other ways to make this faster also,
40 // and it's a slippery slope. For now, let's keep this code simple
41 // since the simplicity helps clarify the atomicity semantics of
42 // this operation. There are also CPU-specific assembly versions
43 // which may or may not want to include such optimizations.)
44
45 if (bits % sizeof(jlong) == 0) {
46 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
47 } else if (bits % sizeof(jint) == 0) {
48 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
49 } else if (bits % sizeof(jshort) == 0) {
50 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
51 } else {
52 // Not aligned, so no need to be atomic.
53 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
54 }
55 }
56
57 #define COPY_ALIGNED_SEGMENT(t) \
58 if (bits % sizeof(t) == 0) { \
59 size_t segment = remain / sizeof(t); \
60 if (segment > 0) { \
61 Copy::conjoint_##t##s_atomic((const t*) cursor_from, (t*) cursor_to, segment); \
62 remain -= segment * sizeof(t); \
63 cursor_from = (void*)(((char*)cursor_from) + segment * sizeof(t)); \
64 cursor_to = (void*)(((char*)cursor_to) + segment * sizeof(t)); \
65 } \
66 } \
67
68 void Copy::copy_value_content(const void* from, void* to, size_t size) {
69 // Simple cases first
70 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size;
71 if (bits % sizeof(jlong) == 0) {
72 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
73 return;
74 } else if (bits % sizeof(jint) == 0) {
75 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
76 return;
77 } else if (bits % sizeof(jshort) == 0) {
78 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
79 return;
80 }
81
82 // Complex cases
83 bits = (uintptr_t) from | (uintptr_t) to;
84 const void* cursor_from = from;
85 void* cursor_to = to;
86 size_t remain = size;
87 COPY_ALIGNED_SEGMENT(jlong)
88 COPY_ALIGNED_SEGMENT(jint)
89 COPY_ALIGNED_SEGMENT(jshort)
90 if (remain > 0) {
91 Copy::conjoint_jbytes((const void*) cursor_from, (void*) cursor_to, remain);
92 }
93 }
94
95 #undef COPY_ALIGNED_SEGMENT
96
97 class CopySwap : AllStatic {
98 public:
99 /**
100 * Copy and optionally byte swap elements
101 *
102 * <swap> - true if elements should be byte swapped
103 *
104 * @param src address of source
105 * @param dst address of destination
106 * @param byte_count number of bytes to copy
107 * @param elem_size size of the elements to copy-swap
108 */
109 template<bool swap>
110 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
111 assert(src != nullptr, "address must not be null");
112 assert(dst != nullptr, "address must not be null");
113 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
114 "incorrect element size: " SIZE_FORMAT, elem_size);
115 assert(is_aligned(byte_count, elem_size),
116 "byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size);
|