1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "oops/access.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/compressedOops.inline.hpp"
33 #include "oops/inlineKlass.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/atomicAccess.hpp"
36 #include "runtime/orderAccess.hpp"
37
38 #include <type_traits>
39
40 template <DecoratorSet decorators>
41 template <DecoratorSet idecorators, typename T>
42 inline typename EnableIf<
43 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
44 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
45 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
46 return CompressedOops::decode_not_null(value);
47 } else {
48 return CompressedOops::decode(value);
49 }
50 }
51
52 template <DecoratorSet decorators>
53 template <DecoratorSet idecorators, typename T>
54 inline typename EnableIf<
55 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
56 typename HeapOopType<idecorators>::type>::type
57 RawAccessBarrier<decorators>::encode_internal(T value) {
58 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
59 return CompressedOops::encode_not_null(value);
60 } else {
61 return CompressedOops::encode(value);
62 }
63 }
64
65 template <DecoratorSet decorators>
66 template <typename T>
67 inline void RawAccessBarrier<decorators>::oop_store(void* addr, T value) {
68 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
69 Encoded encoded = encode(value);
70 store(reinterpret_cast<Encoded*>(addr), encoded);
71 }
72
73 template <DecoratorSet decorators>
74 template <typename T>
75 inline void RawAccessBarrier<decorators>::oop_store_at(oop base, ptrdiff_t offset, T value) {
76 oop_store(field_addr(base, offset), value);
77 }
78
79 template <DecoratorSet decorators>
80 template <typename T>
81 inline T RawAccessBarrier<decorators>::oop_load(void* addr) {
82 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
83 Encoded encoded = load<Encoded>(reinterpret_cast<Encoded*>(addr));
84 return decode<T>(encoded);
85 }
86
87 template <DecoratorSet decorators>
88 template <typename T>
89 inline T RawAccessBarrier<decorators>::oop_load_at(oop base, ptrdiff_t offset) {
90 return oop_load<T>(field_addr(base, offset));
91 }
92
93 template <DecoratorSet decorators>
94 template <typename T>
95 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(void* addr, T compare_value, T new_value) {
96 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
97 Encoded encoded_new = encode(new_value);
98 Encoded encoded_compare = encode(compare_value);
99 Encoded encoded_result = atomic_cmpxchg(reinterpret_cast<Encoded*>(addr),
100 encoded_compare,
101 encoded_new);
102 return decode<T>(encoded_result);
103 }
104
105 template <DecoratorSet decorators>
106 template <typename T>
107 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
108 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
109 }
110
111 template <DecoratorSet decorators>
112 template <typename T>
113 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
114 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
115 Encoded encoded_new = encode(new_value);
116 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
117 return decode<T>(encoded_result);
118 }
119
120 template <DecoratorSet decorators>
121 template <typename T>
122 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
123 return oop_atomic_xchg(field_addr(base, offset), new_value);
124 }
125
126 template <DecoratorSet decorators>
127 template <typename T>
128 inline void RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
129 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
130 size_t length) {
131 arraycopy(src_obj, src_offset_in_bytes, src_raw,
132 dst_obj, dst_offset_in_bytes, dst_raw,
133 length);
134 }
135
136 template <DecoratorSet decorators>
137 template <DecoratorSet ds, typename T>
138 inline typename EnableIf<
139 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
140 RawAccessBarrier<decorators>::load_internal(void* addr) {
141 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
142 OrderAccess::fence();
143 }
144 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
145 }
146
147 template <DecoratorSet decorators>
148 template <DecoratorSet ds, typename T>
149 inline typename EnableIf<
150 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
151 RawAccessBarrier<decorators>::load_internal(void* addr) {
152 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
153 }
154
155 template <DecoratorSet decorators>
156 template <DecoratorSet ds, typename T>
157 inline typename EnableIf<
158 HasDecorator<ds, MO_RELAXED>::value, T>::type
159 RawAccessBarrier<decorators>::load_internal(void* addr) {
160 return AtomicAccess::load(reinterpret_cast<const volatile T*>(addr));
161 }
162
163 template <DecoratorSet decorators>
164 template <DecoratorSet ds, typename T>
165 inline typename EnableIf<
166 HasDecorator<ds, MO_SEQ_CST>::value>::type
167 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
168 AtomicAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
169 }
170
171 template <DecoratorSet decorators>
172 template <DecoratorSet ds, typename T>
173 inline typename EnableIf<
174 HasDecorator<ds, MO_RELEASE>::value>::type
175 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
176 AtomicAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
177 }
178
179 template <DecoratorSet decorators>
180 template <DecoratorSet ds, typename T>
181 inline typename EnableIf<
182 HasDecorator<ds, MO_RELAXED>::value>::type
183 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
184 AtomicAccess::store(reinterpret_cast<volatile T*>(addr), value);
185 }
186
187 template <DecoratorSet decorators>
188 template <DecoratorSet ds, typename T>
189 inline typename EnableIf<
190 HasDecorator<ds, MO_RELAXED>::value, T>::type
191 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
192 return AtomicAccess::cmpxchg(reinterpret_cast<volatile T*>(addr),
193 compare_value,
194 new_value,
195 memory_order_relaxed);
196 }
197
198 template <DecoratorSet decorators>
199 template <DecoratorSet ds, typename T>
200 inline typename EnableIf<
201 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
202 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
203 return AtomicAccess::cmpxchg(reinterpret_cast<volatile T*>(addr),
204 compare_value,
205 new_value,
206 memory_order_conservative);
207 }
208
209 template <DecoratorSet decorators>
210 template <DecoratorSet ds, typename T>
211 inline typename EnableIf<
212 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
213 RawAccessBarrier<decorators>::atomic_xchg_internal(void* addr, T new_value) {
214 return AtomicAccess::xchg(reinterpret_cast<volatile T*>(addr),
215 new_value);
216 }
217
218 class RawAccessBarrierArrayCopy: public AllStatic {
219 template<typename T> struct IsHeapWordSized: public std::integral_constant<bool, sizeof(T) == HeapWordSize> { };
220 public:
221 template <DecoratorSet decorators, typename T>
222 static inline typename EnableIf<
223 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
224 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
225 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
226 size_t length) {
227 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
228 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
229
230 // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
231 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
232 AccessInternal::arraycopy_arrayof_conjoint_oops(src_raw, dst_raw, length);
233 } else {
234 typedef typename HeapOopType<decorators>::type OopType;
235 AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src_raw),
236 reinterpret_cast<OopType*>(dst_raw), length);
237 }
238 }
239
240 template <DecoratorSet decorators, typename T>
241 static inline typename EnableIf<
242 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
243 HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value>::type
244 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
245 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
246 size_t length) {
247 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
248 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
249
250 AccessInternal::arraycopy_arrayof_conjoint(src_raw, dst_raw, length);
251 }
252
253 template <DecoratorSet decorators, typename T>
254 static inline typename EnableIf<
255 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
256 HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value>::type
257 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
258 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
259 size_t length) {
260 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
261 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
262
263 // There is only a disjoint optimization for word granularity copying
264 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
265 AccessInternal::arraycopy_disjoint_words_atomic(src_raw, dst_raw, length);
266 } else {
267 AccessInternal::arraycopy_disjoint_words(src_raw, dst_raw, length);
268 }
269 }
270
271 template <DecoratorSet decorators, typename T>
272 static inline typename EnableIf<
273 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
274 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
275 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
276 !HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
277 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
278 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
279 size_t length) {
280 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
281 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
282
283 AccessInternal::arraycopy_conjoint(src_raw, dst_raw, length);
284 }
285
286 template <DecoratorSet decorators, typename T>
287 static inline typename EnableIf<
288 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
289 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
290 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
291 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
292 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
293 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
294 size_t length) {
295 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
296 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
297
298 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
299 }
300 };
301
302 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
303
304 template <DecoratorSet decorators>
305 template <typename T>
306 inline void RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
307 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
308 size_t length) {
309 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
310 dst_obj, dst_offset_in_bytes, dst_raw,
311 length);
312 }
313
314 template <DecoratorSet decorators>
315 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
316 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
317 // is modifying a reference field in the clonee, a non-oop-atomic copy might
318 // be suspended in the middle of copying the pointer and end up with parts
319 // of two different pointers in the field. Subsequent dereferences will crash.
320 // 4846409: an oop-copy of objects with long or double fields or arrays of same
321 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
322 // of oops. We know objects are aligned on a minimum of an jlong boundary.
323 // The same is true of StubRoutines::object_copy and the various oop_copy
324 // variants, and of the code generated by the inline_native_clone intrinsic.
325
326 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
327 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
328 reinterpret_cast<jlong*>((oopDesc*)dst),
329 align_object_size(size) / HeapWordsPerLong);
330 // Clear the header
331 dst->init_mark();
332 }
333
334 template <DecoratorSet decorators>
335 inline void RawAccessBarrier<decorators>::value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
336 assert(is_aligned(src, md->layout_alignment(lk)) && is_aligned(dst, md->layout_alignment(lk)), "Unaligned value_copy");
337 AccessInternal::value_copy_internal(src, dst, static_cast<size_t>(md->layout_size_in_bytes(lk)));
338 }
339 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP