14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "cppstdlib/type_traits.hpp"
31 #include "oops/access.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/compressedOops.inline.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/atomicAccess.hpp"
36 #include "runtime/orderAccess.hpp"
37
38 template <DecoratorSet decorators>
39 template <DecoratorSet idecorators, typename T>
40 inline typename EnableIf<
41 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
42 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
43 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
44 return CompressedOops::decode_not_null(value);
45 } else {
46 return CompressedOops::decode(value);
47 }
48 }
49
50 template <DecoratorSet decorators>
51 template <DecoratorSet idecorators, typename T>
52 inline typename EnableIf<
53 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
106 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
107 }
108
109 template <DecoratorSet decorators>
110 template <typename T>
111 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
112 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
113 Encoded encoded_new = encode(new_value);
114 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
115 return decode<T>(encoded_result);
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
121 return oop_atomic_xchg(field_addr(base, offset), new_value);
122 }
123
124 template <DecoratorSet decorators>
125 template <typename T>
126 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
127 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
128 size_t length) {
129 return arraycopy(src_obj, src_offset_in_bytes, src_raw,
130 dst_obj, dst_offset_in_bytes, dst_raw,
131 length);
132 }
133
134 template <DecoratorSet decorators>
135 template <DecoratorSet ds, typename T>
136 inline typename EnableIf<
137 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
138 RawAccessBarrier<decorators>::load_internal(void* addr) {
139 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
140 OrderAccess::fence();
141 }
142 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
143 }
144
145 template <DecoratorSet decorators>
146 template <DecoratorSet ds, typename T>
147 inline typename EnableIf<
148 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
149 RawAccessBarrier<decorators>::load_internal(void* addr) {
150 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
151 }
284 template <DecoratorSet decorators, typename T>
285 static inline typename EnableIf<
286 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
287 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
288 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
289 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
290 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
291 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
292 size_t length) {
293 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
294 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
295
296 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
297 }
298 };
299
300 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
301
302 template <DecoratorSet decorators>
303 template <typename T>
304 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
305 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
306 size_t length) {
307 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
308 dst_obj, dst_offset_in_bytes, dst_raw,
309 length);
310 return true;
311 }
312
313 template <DecoratorSet decorators>
314 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
315 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
316 // is modifying a reference field in the clonee, a non-oop-atomic copy might
317 // be suspended in the middle of copying the pointer and end up with parts
318 // of two different pointers in the field. Subsequent dereferences will crash.
319 // 4846409: an oop-copy of objects with long or double fields or arrays of same
320 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
321 // of oops. We know objects are aligned on a minimum of an jlong boundary.
322 // The same is true of StubRoutines::object_copy and the various oop_copy
323 // variants, and of the code generated by the inline_native_clone intrinsic.
324
325 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
326 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
327 reinterpret_cast<jlong*>((oopDesc*)dst),
328 align_object_size(size) / HeapWordsPerLong);
329 // Clear the header
330 dst->init_mark();
331 }
332
333 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "cppstdlib/type_traits.hpp"
31 #include "oops/access.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/compressedOops.inline.hpp"
34 #include "oops/inlineKlass.hpp"
35 #include "oops/oopsHierarchy.hpp"
36 #include "runtime/atomicAccess.hpp"
37 #include "runtime/orderAccess.hpp"
38
39 template <DecoratorSet decorators>
40 template <DecoratorSet idecorators, typename T>
41 inline typename EnableIf<
42 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
43 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
44 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
45 return CompressedOops::decode_not_null(value);
46 } else {
47 return CompressedOops::decode(value);
48 }
49 }
50
51 template <DecoratorSet decorators>
52 template <DecoratorSet idecorators, typename T>
53 inline typename EnableIf<
54 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
107 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
108 }
109
110 template <DecoratorSet decorators>
111 template <typename T>
112 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
113 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
114 Encoded encoded_new = encode(new_value);
115 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
116 return decode<T>(encoded_result);
117 }
118
119 template <DecoratorSet decorators>
120 template <typename T>
121 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
122 return oop_atomic_xchg(field_addr(base, offset), new_value);
123 }
124
125 template <DecoratorSet decorators>
126 template <typename T>
127 inline void RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
128 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
129 size_t length) {
130 arraycopy(src_obj, src_offset_in_bytes, src_raw,
131 dst_obj, dst_offset_in_bytes, dst_raw,
132 length);
133 }
134
135 template <DecoratorSet decorators>
136 template <DecoratorSet ds, typename T>
137 inline typename EnableIf<
138 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
139 RawAccessBarrier<decorators>::load_internal(void* addr) {
140 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
141 OrderAccess::fence();
142 }
143 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
144 }
145
146 template <DecoratorSet decorators>
147 template <DecoratorSet ds, typename T>
148 inline typename EnableIf<
149 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
150 RawAccessBarrier<decorators>::load_internal(void* addr) {
151 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
152 }
285 template <DecoratorSet decorators, typename T>
286 static inline typename EnableIf<
287 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
288 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
289 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
290 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
291 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
292 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
293 size_t length) {
294 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
295 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
296
297 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
298 }
299 };
300
301 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
302
303 template <DecoratorSet decorators>
304 template <typename T>
305 inline void RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
306 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
307 size_t length) {
308 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
309 dst_obj, dst_offset_in_bytes, dst_raw,
310 length);
311 }
312
313 template <DecoratorSet decorators>
314 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
315 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
316 // is modifying a reference field in the clonee, a non-oop-atomic copy might
317 // be suspended in the middle of copying the pointer and end up with parts
318 // of two different pointers in the field. Subsequent dereferences will crash.
319 // 4846409: an oop-copy of objects with long or double fields or arrays of same
320 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
321 // of oops. We know objects are aligned on a minimum of an jlong boundary.
322 // The same is true of StubRoutines::object_copy and the various oop_copy
323 // variants, and of the code generated by the inline_native_clone intrinsic.
324
325 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
326 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
327 reinterpret_cast<jlong*>((oopDesc*)dst),
328 align_object_size(size) / HeapWordsPerLong);
329 // Clear the header
330 dst->init_mark();
331 }
332
333 template <DecoratorSet decorators>
334 inline void RawAccessBarrier<decorators>::value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
335 assert(is_aligned(src, md->layout_alignment(lk)) && is_aligned(dst, md->layout_alignment(lk)), "Unaligned value_copy");
336 AccessInternal::value_copy_internal(src, dst, static_cast<size_t>(md->layout_size_in_bytes(lk)));
337 }
338 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|