35 // (Note: We could improve performance by ignoring the low bits of size,
36 // and putting a short cleanup loop after each bulk copy loop.
37 // There are plenty of other ways to make this faster also,
38 // and it's a slippery slope. For now, let's keep this code simple
39 // since the simplicity helps clarify the atomicity semantics of
40 // this operation. There are also CPU-specific assembly versions
41 // which may or may not want to include such optimizations.)
42
43 if (bits % sizeof(jlong) == 0) {
44 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
45 } else if (bits % sizeof(jint) == 0) {
46 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
47 } else if (bits % sizeof(jshort) == 0) {
48 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
49 } else {
50 // Not aligned, so no need to be atomic.
51 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
52 }
53 }
54
55 class CopySwap : AllStatic {
56 public:
57 /**
58 * Copy and optionally byte swap elements
59 *
60 * <swap> - true if elements should be byte swapped
61 *
62 * @param src address of source
63 * @param dst address of destination
64 * @param byte_count number of bytes to copy
65 * @param elem_size size of the elements to copy-swap
66 */
67 template<bool swap>
68 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
69 assert(src != nullptr, "address must not be null");
70 assert(dst != nullptr, "address must not be null");
71 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
72 "incorrect element size: %zu", elem_size);
73 assert(is_aligned(byte_count, elem_size),
74 "byte_count %zu must be multiple of element size %zu", byte_count, elem_size);
|
35 // (Note: We could improve performance by ignoring the low bits of size,
36 // and putting a short cleanup loop after each bulk copy loop.
37 // There are plenty of other ways to make this faster also,
38 // and it's a slippery slope. For now, let's keep this code simple
39 // since the simplicity helps clarify the atomicity semantics of
40 // this operation. There are also CPU-specific assembly versions
41 // which may or may not want to include such optimizations.)
42
43 if (bits % sizeof(jlong) == 0) {
44 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
45 } else if (bits % sizeof(jint) == 0) {
46 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
47 } else if (bits % sizeof(jshort) == 0) {
48 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
49 } else {
50 // Not aligned, so no need to be atomic.
51 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
52 }
53 }
54
55 #define COPY_ALIGNED_SEGMENT(t) \
56 if (bits % sizeof(t) == 0) { \
57 size_t segment = remain / sizeof(t); \
58 if (segment > 0) { \
59 Copy::conjoint_##t##s_atomic((const t*) cursor_from, (t*) cursor_to, segment); \
60 remain -= segment * sizeof(t); \
61 cursor_from = (void*)(((char*)cursor_from) + segment * sizeof(t)); \
62 cursor_to = (void*)(((char*)cursor_to) + segment * sizeof(t)); \
63 } \
64 } \
65
66 void Copy::copy_value_content(const void* from, void* to, size_t size) {
67 // Simple cases first
68 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size;
69 if (bits % sizeof(jlong) == 0) {
70 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
71 return;
72 } else if (bits % sizeof(jint) == 0) {
73 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
74 return;
75 } else if (bits % sizeof(jshort) == 0) {
76 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
77 return;
78 }
79
80 // Complex cases
81 bits = (uintptr_t) from | (uintptr_t) to;
82 const void* cursor_from = from;
83 void* cursor_to = to;
84 size_t remain = size;
85 COPY_ALIGNED_SEGMENT(jlong)
86 COPY_ALIGNED_SEGMENT(jint)
87 COPY_ALIGNED_SEGMENT(jshort)
88 if (remain > 0) {
89 Copy::conjoint_jbytes((const void*) cursor_from, (void*) cursor_to, remain);
90 }
91 }
92
93 #undef COPY_ALIGNED_SEGMENT
94
95 class CopySwap : AllStatic {
96 public:
97 /**
98 * Copy and optionally byte swap elements
99 *
100 * <swap> - true if elements should be byte swapped
101 *
102 * @param src address of source
103 * @param dst address of destination
104 * @param byte_count number of bytes to copy
105 * @param elem_size size of the elements to copy-swap
106 */
107 template<bool swap>
108 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
109 assert(src != nullptr, "address must not be null");
110 assert(dst != nullptr, "address must not be null");
111 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
112 "incorrect element size: %zu", elem_size);
113 assert(is_aligned(byte_count, elem_size),
114 "byte_count %zu must be multiple of element size %zu", byte_count, elem_size);
|