9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "runtime/sharedRuntime.hpp"
26 #include "utilities/align.hpp"
27 #include "utilities/byteswap.hpp"
28 #include "utilities/copy.hpp"
29
30
31 // Copy bytes; larger units are filled atomically if everything is aligned.
32 void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) {
33 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size;
34
35 // (Note: We could improve performance by ignoring the low bits of size,
36 // and putting a short cleanup loop after each bulk copy loop.
37 // There are plenty of other ways to make this faster also,
38 // and it's a slippery slope. For now, let's keep this code simple
39 // since the simplicity helps clarify the atomicity semantics of
40 // this operation. There are also CPU-specific assembly versions
41 // which may or may not want to include such optimizations.)
42
43 if (bits % sizeof(jlong) == 0) {
44 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
45 } else if (bits % sizeof(jint) == 0) {
46 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
47 } else if (bits % sizeof(jshort) == 0) {
48 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
49 } else {
50 // Not aligned, so no need to be atomic.
51 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
52 }
53 }
54
55 class CopySwap : AllStatic {
56 public:
57 /**
58 * Copy and optionally byte swap elements
59 *
60 * <swap> - true if elements should be byte swapped
61 *
62 * @param src address of source
63 * @param dst address of destination
64 * @param byte_count number of bytes to copy
65 * @param elem_size size of the elements to copy-swap
66 */
67 template<bool swap>
68 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
69 assert(src != nullptr, "address must not be null");
70 assert(dst != nullptr, "address must not be null");
71 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
72 "incorrect element size: %zu", elem_size);
73 assert(is_aligned(byte_count, elem_size),
74 "byte_count %zu must be multiple of element size %zu", byte_count, elem_size);
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "runtime/sharedRuntime.hpp"
26 #include "utilities/align.hpp"
27 #include "utilities/byteswap.hpp"
28 #include "utilities/copy.hpp"
29 #include "utilities/debug.hpp"
30
31
32 // Copy bytes; larger units are filled atomically if everything is aligned.
33 void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) {
34 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size;
35
36 // (Note: We could improve performance by ignoring the low bits of size,
37 // and putting a short cleanup loop after each bulk copy loop.
38 // There are plenty of other ways to make this faster also,
39 // and it's a slippery slope. For now, let's keep this code simple
40 // since the simplicity helps clarify the atomicity semantics of
41 // this operation. There are also CPU-specific assembly versions
42 // which may or may not want to include such optimizations.)
43
44 if (bits % sizeof(jlong) == 0) {
45 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
46 } else if (bits % sizeof(jint) == 0) {
47 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
48 } else if (bits % sizeof(jshort) == 0) {
49 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
50 } else {
51 // Not aligned, so no need to be atomic.
52 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
53 }
54 }
55
56 #define COPY_ALIGNED_SEGMENT(t) \
57 if (bits % sizeof(t) == 0) { \
58 size_t segment = remaining_bytes / sizeof(t); \
59 if (segment > 0) { \
60 Copy::conjoint_##t##s_atomic((const t*)cursor_from, (t*)cursor_to, \
61 segment); \
62 remaining_bytes -= segment * sizeof(t); \
63 cursor_from = (void*)(((char*)cursor_from) + segment * sizeof(t)); \
64 cursor_to = (void*)(((char*)cursor_to) + segment * sizeof(t)); \
65 } \
66 }
67
68 void Copy::copy_value_content(const void* from, void* to, size_t size) {
69 // The alignment of the containing object must satisfy the alignment of all
70 // its fields, as such we can safely copy atomically starting at the largest
71 // atomic size which the payloads are aligned to. Any trailing payload smaller
72 // than this atomic size must have a lower alignment requirement. This
73 // property holds recursively down to the smallest atomic size.
74 const uintptr_t bits = (uintptr_t)from | (uintptr_t)to;
75 const void* cursor_from = from;
76 void* cursor_to = to;
77 size_t remaining_bytes = size;
78 COPY_ALIGNED_SEGMENT(jlong)
79 COPY_ALIGNED_SEGMENT(jint)
80 COPY_ALIGNED_SEGMENT(jshort)
81 if (remaining_bytes > 0) {
82 Copy::conjoint_jbytes(cursor_from, cursor_to, remaining_bytes);
83 }
84 }
85
86 #undef COPY_ALIGNED_SEGMENT
87
88 template <typename T>
89 static void clear_value_content_helper(uintptr_t* to_cursor_addr, size_t* remaining_bytes_addr) {
90 uintptr_t& to_cursor = *to_cursor_addr;
91 size_t& remaining_bytes = *remaining_bytes_addr;
92
93 if (to_cursor % sizeof(T) == 0 && remaining_bytes >= sizeof(T)) {
94 const size_t copy_bytes = align_down(remaining_bytes, sizeof(T));
95 Copy::fill_to_memory_atomic((void*)to_cursor, copy_bytes);
96 to_cursor += copy_bytes;
97 remaining_bytes -= copy_bytes;
98 }
99 }
100
101 void Copy::clear_value_content(void* to, size_t size) {
102 // The alignment of the containing object must satisfy the alignment of all
103 // its fields, as such we can safely copy atomically starting at the largest
104 // atomic size which the payloads are aligned to. Any trailing payload smaller
105 // than this atomic size must have a lower alignment requirement. This
106 // property holds recursively down to the smallest atomic size.
107 uintptr_t to_cursor = uintptr_t(to);
108 size_t remaining_bytes = size;
109
110 // Clear jlong alignend segments
111 clear_value_content_helper<jlong>(&to_cursor, &remaining_bytes);
112
113 // Clear jint alignend segments
114 clear_value_content_helper<jint>(&to_cursor, &remaining_bytes);
115
116 // Clear jshort alignend segments
117 clear_value_content_helper<jshort>(&to_cursor, &remaining_bytes);
118
119 // Clear remaining bytes
120 clear_value_content_helper<jbyte>(&to_cursor, &remaining_bytes);
121
122 postcond(remaining_bytes == 0);
123 postcond(to_cursor - size == uintptr_t(to));
124 }
125
126 class CopySwap : AllStatic {
127 public:
128 /**
129 * Copy and optionally byte swap elements
130 *
131 * <swap> - true if elements should be byte swapped
132 *
133 * @param src address of source
134 * @param dst address of destination
135 * @param byte_count number of bytes to copy
136 * @param elem_size size of the elements to copy-swap
137 */
138 template<bool swap>
139 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
140 assert(src != nullptr, "address must not be null");
141 assert(dst != nullptr, "address must not be null");
142 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
143 "incorrect element size: %zu", elem_size);
144 assert(is_aligned(byte_count, elem_size),
145 "byte_count %zu must be multiple of element size %zu", byte_count, elem_size);
|