16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "oops/access.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/compressedOops.inline.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/orderAccess.hpp"
36
37 template <DecoratorSet decorators>
38 template <DecoratorSet idecorators, typename T>
39 inline typename EnableIf<
40 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
41 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
42 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
43 return CompressedOops::decode_not_null(value);
44 } else {
45 return CompressedOops::decode(value);
46 }
47 }
48
49 template <DecoratorSet decorators>
50 template <DecoratorSet idecorators, typename T>
51 inline typename EnableIf<
52 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
53 typename HeapOopType<idecorators>::type>::type
54 RawAccessBarrier<decorators>::encode_internal(T value) {
55 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
105 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
106 }
107
108 template <DecoratorSet decorators>
109 template <typename T>
110 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
111 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
112 Encoded encoded_new = encode(new_value);
113 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
114 return decode<T>(encoded_result);
115 }
116
117 template <DecoratorSet decorators>
118 template <typename T>
119 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
120 return oop_atomic_xchg(field_addr(base, offset), new_value);
121 }
122
123 template <DecoratorSet decorators>
124 template <typename T>
125 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
126 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
127 size_t length) {
128 return arraycopy(src_obj, src_offset_in_bytes, src_raw,
129 dst_obj, dst_offset_in_bytes, dst_raw,
130 length);
131 }
132
133 template <DecoratorSet decorators>
134 template <DecoratorSet ds, typename T>
135 inline typename EnableIf<
136 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
137 RawAccessBarrier<decorators>::load_internal(void* addr) {
138 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
139 OrderAccess::fence();
140 }
141 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
142 }
143
144 template <DecoratorSet decorators>
145 template <DecoratorSet ds, typename T>
146 inline typename EnableIf<
147 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
148 RawAccessBarrier<decorators>::load_internal(void* addr) {
149 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
150 }
321 template <DecoratorSet decorators, typename T>
322 static inline typename EnableIf<
323 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
324 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
325 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
326 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
327 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
328 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
329 size_t length) {
330 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
331 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
332
333 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
334 }
335 };
336
337 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public IntegralConstant<bool, false> { };
338
339 template <DecoratorSet decorators>
340 template <typename T>
341 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
342 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
343 size_t length) {
344 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
345 dst_obj, dst_offset_in_bytes, dst_raw,
346 length);
347 return true;
348 }
349
350 template <DecoratorSet decorators>
351 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
352 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
353 // is modifying a reference field in the clonee, a non-oop-atomic copy might
354 // be suspended in the middle of copying the pointer and end up with parts
355 // of two different pointers in the field. Subsequent dereferences will crash.
356 // 4846409: an oop-copy of objects with long or double fields or arrays of same
357 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
358 // of oops. We know objects are aligned on a minimum of an jlong boundary.
359 // The same is true of StubRoutines::object_copy and the various oop_copy
360 // variants, and of the code generated by the inline_native_clone intrinsic.
361
362 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
363 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
364 reinterpret_cast<jlong*>((oopDesc*)dst),
365 align_object_size(size) / HeapWordsPerLong);
366 // Clear the header
367 dst->init_mark();
368 }
369
370 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "oops/access.hpp"
31 #include "oops/arrayOop.hpp"
32 #include "oops/compressedOops.inline.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/orderAccess.hpp"
36 #include "oops/inlineKlass.hpp"
37
38 template <DecoratorSet decorators>
39 template <DecoratorSet idecorators, typename T>
40 inline typename EnableIf<
41 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
42 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
43 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
44 return CompressedOops::decode_not_null(value);
45 } else {
46 return CompressedOops::decode(value);
47 }
48 }
49
50 template <DecoratorSet decorators>
51 template <DecoratorSet idecorators, typename T>
52 inline typename EnableIf<
53 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
54 typename HeapOopType<idecorators>::type>::type
55 RawAccessBarrier<decorators>::encode_internal(T value) {
56 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
106 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
107 }
108
109 template <DecoratorSet decorators>
110 template <typename T>
111 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
112 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
113 Encoded encoded_new = encode(new_value);
114 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
115 return decode<T>(encoded_result);
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
121 return oop_atomic_xchg(field_addr(base, offset), new_value);
122 }
123
124 template <DecoratorSet decorators>
125 template <typename T>
126 inline void RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
127 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
128 size_t length) {
129 arraycopy(src_obj, src_offset_in_bytes, src_raw,
130 dst_obj, dst_offset_in_bytes, dst_raw,
131 length);
132 }
133
134 template <DecoratorSet decorators>
135 template <DecoratorSet ds, typename T>
136 inline typename EnableIf<
137 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
138 RawAccessBarrier<decorators>::load_internal(void* addr) {
139 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
140 OrderAccess::fence();
141 }
142 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
143 }
144
145 template <DecoratorSet decorators>
146 template <DecoratorSet ds, typename T>
147 inline typename EnableIf<
148 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
149 RawAccessBarrier<decorators>::load_internal(void* addr) {
150 return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
151 }
322 template <DecoratorSet decorators, typename T>
323 static inline typename EnableIf<
324 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
325 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
326 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
327 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
328 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
329 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
330 size_t length) {
331 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
332 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
333
334 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
335 }
336 };
337
338 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public IntegralConstant<bool, false> { };
339
340 template <DecoratorSet decorators>
341 template <typename T>
342 inline void RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
343 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
344 size_t length) {
345 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
346 dst_obj, dst_offset_in_bytes, dst_raw,
347 length);
348 }
349
350 template <DecoratorSet decorators>
351 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
352 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
353 // is modifying a reference field in the clonee, a non-oop-atomic copy might
354 // be suspended in the middle of copying the pointer and end up with parts
355 // of two different pointers in the field. Subsequent dereferences will crash.
356 // 4846409: an oop-copy of objects with long or double fields or arrays of same
357 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
358 // of oops. We know objects are aligned on a minimum of an jlong boundary.
359 // The same is true of StubRoutines::object_copy and the various oop_copy
360 // variants, and of the code generated by the inline_native_clone intrinsic.
361
362 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
363 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
364 reinterpret_cast<jlong*>((oopDesc*)dst),
365 align_object_size(size) / HeapWordsPerLong);
366 // Clear the header
367 dst->init_mark();
368 }
369
370 template <DecoratorSet decorators>
371 inline void RawAccessBarrier<decorators>::value_copy(void* src, void* dst, InlineKlass* md) {
372 assert(is_aligned(src, md->get_alignment()) && is_aligned(dst, md->get_alignment()), "Unalign value_copy");
373 AccessInternal::arraycopy_conjoint_atomic(src, dst, static_cast<size_t>(md->get_exact_size_in_bytes()));
374 }
375 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|