16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "oops/access.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/compressedOops.inline.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/orderAccess.hpp"
36
37 #include <type_traits>
38
39 template <DecoratorSet decorators>
40 template <DecoratorSet idecorators, typename T>
41 inline typename EnableIf<
42 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
43 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
44 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
45 return CompressedOops::decode_not_null(value);
46 } else {
47 return CompressedOops::decode(value);
48 }
49 }
50
51 template <DecoratorSet decorators>
52 template <DecoratorSet idecorators, typename T>
53 inline typename EnableIf<
54 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
55 typename HeapOopType<idecorators>::type>::type
107 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
108 }
109
110 template <DecoratorSet decorators>
111 template <typename T>
112 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
113 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
114 Encoded encoded_new = encode(new_value);
115 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
116 return decode<T>(encoded_result);
117 }
118
119 template <DecoratorSet decorators>
120 template <typename T>
121 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
122 return oop_atomic_xchg(field_addr(base, offset), new_value);
123 }
124
125 template <DecoratorSet decorators>
126 template <typename T>
127 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
128 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
129 size_t length) {
130 return arraycopy(src_obj, src_offset_in_bytes, src_raw,
131 dst_obj, dst_offset_in_bytes, dst_raw,
132 length);
133 }
134
135 template <DecoratorSet decorators>
136 template <DecoratorSet ds, typename T>
137 inline typename EnableIf<
138 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
139 RawAccessBarrier<decorators>::load_internal(void* addr) {
140 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
141 OrderAccess::fence();
142 }
143 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
144 }
145
146 template <DecoratorSet decorators>
147 template <DecoratorSet ds, typename T>
148 inline typename EnableIf<
149 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
150 RawAccessBarrier<decorators>::load_internal(void* addr) {
151 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
152 }
285 template <DecoratorSet decorators, typename T>
286 static inline typename EnableIf<
287 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
288 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
289 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
290 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
291 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
292 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
293 size_t length) {
294 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
295 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
296
297 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
298 }
299 };
300
301 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
302
303 template <DecoratorSet decorators>
304 template <typename T>
305 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
306 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
307 size_t length) {
308 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
309 dst_obj, dst_offset_in_bytes, dst_raw,
310 length);
311 return true;
312 }
313
314 template <DecoratorSet decorators>
315 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
316 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
317 // is modifying a reference field in the clonee, a non-oop-atomic copy might
318 // be suspended in the middle of copying the pointer and end up with parts
319 // of two different pointers in the field. Subsequent dereferences will crash.
320 // 4846409: an oop-copy of objects with long or double fields or arrays of same
321 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
322 // of oops. We know objects are aligned on a minimum of an jlong boundary.
323 // The same is true of StubRoutines::object_copy and the various oop_copy
324 // variants, and of the code generated by the inline_native_clone intrinsic.
325
326 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
327 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
328 reinterpret_cast<jlong*>((oopDesc*)dst),
329 align_object_size(size) / HeapWordsPerLong);
330 // Clear the header
331 dst->init_mark();
332 }
333
334 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "oops/access.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/compressedOops.inline.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/orderAccess.hpp"
36 #include "oops/inlineKlass.hpp"
37
38 #include <type_traits>
39
40 template <DecoratorSet decorators>
41 template <DecoratorSet idecorators, typename T>
42 inline typename EnableIf<
43 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
44 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
45 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
46 return CompressedOops::decode_not_null(value);
47 } else {
48 return CompressedOops::decode(value);
49 }
50 }
51
52 template <DecoratorSet decorators>
53 template <DecoratorSet idecorators, typename T>
54 inline typename EnableIf<
55 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
56 typename HeapOopType<idecorators>::type>::type
108 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
109 }
110
111 template <DecoratorSet decorators>
112 template <typename T>
113 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
114 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
115 Encoded encoded_new = encode(new_value);
116 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
117 return decode<T>(encoded_result);
118 }
119
120 template <DecoratorSet decorators>
121 template <typename T>
122 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
123 return oop_atomic_xchg(field_addr(base, offset), new_value);
124 }
125
126 template <DecoratorSet decorators>
127 template <typename T>
128 inline void RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
129 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
130 size_t length) {
131 arraycopy(src_obj, src_offset_in_bytes, src_raw,
132 dst_obj, dst_offset_in_bytes, dst_raw,
133 length);
134 }
135
136 template <DecoratorSet decorators>
137 template <DecoratorSet ds, typename T>
138 inline typename EnableIf<
139 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
140 RawAccessBarrier<decorators>::load_internal(void* addr) {
141 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
142 OrderAccess::fence();
143 }
144 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
145 }
146
147 template <DecoratorSet decorators>
148 template <DecoratorSet ds, typename T>
149 inline typename EnableIf<
150 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
151 RawAccessBarrier<decorators>::load_internal(void* addr) {
152 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
153 }
286 template <DecoratorSet decorators, typename T>
287 static inline typename EnableIf<
288 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
289 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
290 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
291 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
292 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
293 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
294 size_t length) {
295 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
296 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
297
298 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
299 }
300 };
301
302 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
303
304 template <DecoratorSet decorators>
305 template <typename T>
306 inline void RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
307 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
308 size_t length) {
309 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
310 dst_obj, dst_offset_in_bytes, dst_raw,
311 length);
312 }
313
314 template <DecoratorSet decorators>
315 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
316 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
317 // is modifying a reference field in the clonee, a non-oop-atomic copy might
318 // be suspended in the middle of copying the pointer and end up with parts
319 // of two different pointers in the field. Subsequent dereferences will crash.
320 // 4846409: an oop-copy of objects with long or double fields or arrays of same
321 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
322 // of oops. We know objects are aligned on a minimum of an jlong boundary.
323 // The same is true of StubRoutines::object_copy and the various oop_copy
324 // variants, and of the code generated by the inline_native_clone intrinsic.
325
326 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
327 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
328 reinterpret_cast<jlong*>((oopDesc*)dst),
329 align_object_size(size) / HeapWordsPerLong);
330 // Clear the header
331 dst->init_mark();
332 }
333
334 template <DecoratorSet decorators>
335 inline void RawAccessBarrier<decorators>::value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
336 assert(is_aligned(src, md->layout_alignment(lk)) && is_aligned(dst, md->layout_alignment(lk)), "Unaligned value_copy");
337 AccessInternal::value_copy_internal(src, dst, static_cast<size_t>(md->layout_size_in_bytes(lk)));
338 }
339 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|