1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "cppstdlib/type_traits.hpp"
31 #include "oops/access.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/compressedOops.inline.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/atomicAccess.hpp"
36 #include "runtime/orderAccess.hpp"
37
38 template <DecoratorSet decorators>
39 template <DecoratorSet idecorators, typename T>
40 inline typename EnableIf<
41 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
42 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
43 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
44 return CompressedOops::decode_not_null(value);
45 } else {
46 return CompressedOops::decode(value);
47 }
48 }
49
50 template <DecoratorSet decorators>
51 template <DecoratorSet idecorators, typename T>
52 inline typename EnableIf<
53 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
54 typename HeapOopType<idecorators>::type>::type
55 RawAccessBarrier<decorators>::encode_internal(T value) {
56 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
57 return CompressedOops::encode_not_null(value);
58 } else {
59 return CompressedOops::encode(value);
60 }
61 }
62
63 template <DecoratorSet decorators>
64 template <typename T>
65 inline void RawAccessBarrier<decorators>::oop_store(void* addr, T value) {
66 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
67 Encoded encoded = encode(value);
68 store(reinterpret_cast<Encoded*>(addr), encoded);
69 }
70
71 template <DecoratorSet decorators>
72 template <typename T>
73 inline void RawAccessBarrier<decorators>::oop_store_at(oop base, ptrdiff_t offset, T value) {
74 oop_store(field_addr(base, offset), value);
75 }
76
77 template <DecoratorSet decorators>
78 template <typename T>
79 inline T RawAccessBarrier<decorators>::oop_load(void* addr) {
80 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
81 Encoded encoded = load<Encoded>(reinterpret_cast<Encoded*>(addr));
82 return decode<T>(encoded);
83 }
84
85 template <DecoratorSet decorators>
86 template <typename T>
87 inline T RawAccessBarrier<decorators>::oop_load_at(oop base, ptrdiff_t offset) {
88 return oop_load<T>(field_addr(base, offset));
89 }
90
91 template <DecoratorSet decorators>
92 template <typename T>
93 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(void* addr, T compare_value, T new_value) {
94 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
95 Encoded encoded_new = encode(new_value);
96 Encoded encoded_compare = encode(compare_value);
97 Encoded encoded_result = atomic_cmpxchg(reinterpret_cast<Encoded*>(addr),
98 encoded_compare,
99 encoded_new);
100 return decode<T>(encoded_result);
101 }
102
103 template <DecoratorSet decorators>
104 template <typename T>
105 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
106 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
107 }
108
109 template <DecoratorSet decorators>
110 template <typename T>
111 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
112 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
113 Encoded encoded_new = encode(new_value);
114 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
115 return decode<T>(encoded_result);
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
121 return oop_atomic_xchg(field_addr(base, offset), new_value);
122 }
123
124 template <DecoratorSet decorators>
125 template <typename T>
126 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
127 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
128 size_t length) {
129 return arraycopy(src_obj, src_offset_in_bytes, src_raw,
130 dst_obj, dst_offset_in_bytes, dst_raw,
131 length);
132 }
133
134 template <DecoratorSet decorators>
135 template <DecoratorSet ds, typename T>
136 inline typename EnableIf<
137 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
138 RawAccessBarrier<decorators>::load_internal(void* addr) {
139 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
140 OrderAccess::fence();
141 }
142 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
143 }
144
145 template <DecoratorSet decorators>
146 template <DecoratorSet ds, typename T>
147 inline typename EnableIf<
148 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
149 RawAccessBarrier<decorators>::load_internal(void* addr) {
150 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
151 }
152
153 template <DecoratorSet decorators>
154 template <DecoratorSet ds, typename T>
155 inline typename EnableIf<
156 HasDecorator<ds, MO_RELAXED>::value, T>::type
157 RawAccessBarrier<decorators>::load_internal(void* addr) {
158 return AtomicAccess::load(reinterpret_cast<const volatile T*>(addr));
159 }
160
161 template <DecoratorSet decorators>
162 template <DecoratorSet ds, typename T>
163 inline typename EnableIf<
164 HasDecorator<ds, MO_SEQ_CST>::value>::type
165 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
166 AtomicAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
167 }
168
169 template <DecoratorSet decorators>
170 template <DecoratorSet ds, typename T>
171 inline typename EnableIf<
172 HasDecorator<ds, MO_RELEASE>::value>::type
173 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
174 AtomicAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
175 }
176
177 template <DecoratorSet decorators>
178 template <DecoratorSet ds, typename T>
179 inline typename EnableIf<
180 HasDecorator<ds, MO_RELAXED>::value>::type
181 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
182 AtomicAccess::store(reinterpret_cast<volatile T*>(addr), value);
183 }
184
185 template <DecoratorSet decorators>
186 template <DecoratorSet ds, typename T>
187 inline typename EnableIf<
188 HasDecorator<ds, MO_RELAXED>::value, T>::type
189 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
190 return AtomicAccess::cmpxchg(reinterpret_cast<volatile T*>(addr),
191 compare_value,
192 new_value,
193 memory_order_relaxed);
194 }
195
196 template <DecoratorSet decorators>
197 template <DecoratorSet ds, typename T>
198 inline typename EnableIf<
199 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
200 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
201 return AtomicAccess::cmpxchg(reinterpret_cast<volatile T*>(addr),
202 compare_value,
203 new_value,
204 memory_order_conservative);
205 }
206
207 template <DecoratorSet decorators>
208 template <DecoratorSet ds, typename T>
209 inline typename EnableIf<
210 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
211 RawAccessBarrier<decorators>::atomic_xchg_internal(void* addr, T new_value) {
212 return AtomicAccess::xchg(reinterpret_cast<volatile T*>(addr),
213 new_value);
214 }
215
216 class RawAccessBarrierArrayCopy: public AllStatic {
217 template<typename T> struct IsHeapWordSized: public std::integral_constant<bool, sizeof(T) == HeapWordSize> { };
218 public:
219 template <DecoratorSet decorators, typename T>
220 static inline typename EnableIf<
221 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
222 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
223 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
224 size_t length) {
225 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
226 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
227
228 // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
229 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
230 AccessInternal::arraycopy_arrayof_conjoint_oops(src_raw, dst_raw, length);
231 } else {
232 typedef typename HeapOopType<decorators>::type OopType;
233 AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src_raw),
234 reinterpret_cast<OopType*>(dst_raw), length);
235 }
236 }
237
238 template <DecoratorSet decorators, typename T>
239 static inline typename EnableIf<
240 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
241 HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value>::type
242 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
243 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
244 size_t length) {
245 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
246 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
247
248 AccessInternal::arraycopy_arrayof_conjoint(src_raw, dst_raw, length);
249 }
250
251 template <DecoratorSet decorators, typename T>
252 static inline typename EnableIf<
253 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
254 HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value>::type
255 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
256 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
257 size_t length) {
258 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
259 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
260
261 // There is only a disjoint optimization for word granularity copying
262 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
263 AccessInternal::arraycopy_disjoint_words_atomic(src_raw, dst_raw, length);
264 } else {
265 AccessInternal::arraycopy_disjoint_words(src_raw, dst_raw, length);
266 }
267 }
268
269 template <DecoratorSet decorators, typename T>
270 static inline typename EnableIf<
271 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
272 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
273 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
274 !HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
275 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
276 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
277 size_t length) {
278 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
279 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
280
281 AccessInternal::arraycopy_conjoint(src_raw, dst_raw, length);
282 }
283
284 template <DecoratorSet decorators, typename T>
285 static inline typename EnableIf<
286 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
287 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
288 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
289 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
290 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
291 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
292 size_t length) {
293 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
294 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
295
296 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
297 }
298 };
299
300 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
301
302 template <DecoratorSet decorators>
303 template <typename T>
304 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
305 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
306 size_t length) {
307 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
308 dst_obj, dst_offset_in_bytes, dst_raw,
309 length);
310 return true;
311 }
312
313 template <DecoratorSet decorators>
314 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
315 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
316 // is modifying a reference field in the clonee, a non-oop-atomic copy might
317 // be suspended in the middle of copying the pointer and end up with parts
318 // of two different pointers in the field. Subsequent dereferences will crash.
319 // 4846409: an oop-copy of objects with long or double fields or arrays of same
320 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
321 // of oops. We know objects are aligned on a minimum of an jlong boundary.
322 // The same is true of StubRoutines::object_copy and the various oop_copy
323 // variants, and of the code generated by the inline_native_clone intrinsic.
324
325 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
326 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
327 reinterpret_cast<jlong*>((oopDesc*)dst),
328 align_object_size(size) / HeapWordsPerLong);
329 // Clear the header
330 dst->init_mark();
331 }
332
333 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP