14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "cppstdlib/type_traits.hpp"
31 #include "oops/access.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/compressedOops.inline.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/atomicAccess.hpp"
36 #include "runtime/orderAccess.hpp"
37
38 template <DecoratorSet decorators>
39 template <DecoratorSet idecorators, typename T>
40 inline typename EnableIf<
41 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
42 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
43 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
44 return CompressedOops::decode_not_null(value);
45 } else {
46 return CompressedOops::decode(value);
47 }
48 }
49
50 template <DecoratorSet decorators>
51 template <DecoratorSet idecorators, typename T>
52 inline typename EnableIf<
53 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
54 typename HeapOopType<idecorators>::type>::type
106 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
107 }
108
109 template <DecoratorSet decorators>
110 template <typename T>
111 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
112 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
113 Encoded encoded_new = encode(new_value);
114 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
115 return decode<T>(encoded_result);
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
121 return oop_atomic_xchg(field_addr(base, offset), new_value);
122 }
123
124 template <DecoratorSet decorators>
125 template <typename T>
126 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
127 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
128 size_t length) {
129 return arraycopy(src_obj, src_offset_in_bytes, src_raw,
130 dst_obj, dst_offset_in_bytes, dst_raw,
131 length);
132 }
133
134 template <DecoratorSet decorators>
135 template <DecoratorSet ds, typename T>
136 inline typename EnableIf<
137 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
138 RawAccessBarrier<decorators>::load_internal(void* addr) {
139 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
140 OrderAccess::fence();
141 }
142 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
143 }
144
145 template <DecoratorSet decorators>
146 template <DecoratorSet ds, typename T>
147 inline typename EnableIf<
148 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
149 RawAccessBarrier<decorators>::load_internal(void* addr) {
150 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
151 }
284 template <DecoratorSet decorators, typename T>
285 static inline typename EnableIf<
286 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
287 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
288 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
289 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
290 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
291 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
292 size_t length) {
293 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
294 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
295
296 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
297 }
298 };
299
300 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
301
302 template <DecoratorSet decorators>
303 template <typename T>
304 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
305 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
306 size_t length) {
307 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
308 dst_obj, dst_offset_in_bytes, dst_raw,
309 length);
310 return true;
311 }
312
313 template <DecoratorSet decorators>
314 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
315 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
316 // is modifying a reference field in the clonee, a non-oop-atomic copy might
317 // be suspended in the middle of copying the pointer and end up with parts
318 // of two different pointers in the field. Subsequent dereferences will crash.
319 // 4846409: an oop-copy of objects with long or double fields or arrays of same
320 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
321 // of oops. We know objects are aligned on a minimum of an jlong boundary.
322 // The same is true of StubRoutines::object_copy and the various oop_copy
323 // variants, and of the code generated by the inline_native_clone intrinsic.
324
325 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
326 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
327 reinterpret_cast<jlong*>((oopDesc*)dst),
328 align_object_size(size) / HeapWordsPerLong);
329 // Clear the header
330 dst->init_mark();
331 }
332
333 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
27
28 #include "oops/accessBackend.hpp"
29
30 #include "cppstdlib/type_traits.hpp"
31 #include "oops/access.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/compressedOops.inline.hpp"
34 #include "oops/inlineKlass.hpp"
35 #include "oops/layoutKind.hpp"
36 #include "oops/oopsHierarchy.hpp"
37 #include "oops/valuePayload.inline.hpp"
38 #include "runtime/atomicAccess.hpp"
39 #include "runtime/orderAccess.hpp"
40
41 template <DecoratorSet decorators>
42 template <DecoratorSet idecorators, typename T>
43 inline typename EnableIf<
44 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
45 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
46 if (HasDecorator<decorators, IS_NOT_NULL>::value) {
47 return CompressedOops::decode_not_null(value);
48 } else {
49 return CompressedOops::decode(value);
50 }
51 }
52
53 template <DecoratorSet decorators>
54 template <DecoratorSet idecorators, typename T>
55 inline typename EnableIf<
56 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
57 typename HeapOopType<idecorators>::type>::type
109 return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
110 }
111
112 template <DecoratorSet decorators>
113 template <typename T>
114 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
115 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
116 Encoded encoded_new = encode(new_value);
117 Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
118 return decode<T>(encoded_result);
119 }
120
121 template <DecoratorSet decorators>
122 template <typename T>
123 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
124 return oop_atomic_xchg(field_addr(base, offset), new_value);
125 }
126
127 template <DecoratorSet decorators>
128 template <typename T>
129 inline void RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
130 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
131 size_t length) {
132 arraycopy(src_obj, src_offset_in_bytes, src_raw,
133 dst_obj, dst_offset_in_bytes, dst_raw,
134 length);
135 }
136
137 template <DecoratorSet decorators>
138 template <DecoratorSet ds, typename T>
139 inline typename EnableIf<
140 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
141 RawAccessBarrier<decorators>::load_internal(void* addr) {
142 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
143 OrderAccess::fence();
144 }
145 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
146 }
147
148 template <DecoratorSet decorators>
149 template <DecoratorSet ds, typename T>
150 inline typename EnableIf<
151 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
152 RawAccessBarrier<decorators>::load_internal(void* addr) {
153 return AtomicAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
154 }
287 template <DecoratorSet decorators, typename T>
288 static inline typename EnableIf<
289 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
290 !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
291 !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
292 HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
293 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
294 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
295 size_t length) {
296 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
297 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
298
299 AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
300 }
301 };
302
303 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public std::false_type { };
304
305 template <DecoratorSet decorators>
306 template <typename T>
307 inline void RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
308 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
309 size_t length) {
310 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
311 dst_obj, dst_offset_in_bytes, dst_raw,
312 length);
313 }
314
315 template <DecoratorSet decorators>
316 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
317 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
318 // is modifying a reference field in the clonee, a non-oop-atomic copy might
319 // be suspended in the middle of copying the pointer and end up with parts
320 // of two different pointers in the field. Subsequent dereferences will crash.
321 // 4846409: an oop-copy of objects with long or double fields or arrays of same
322 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
323 // of oops. We know objects are aligned on a minimum of an jlong boundary.
324 // The same is true of StubRoutines::object_copy and the various oop_copy
325 // variants, and of the code generated by the inline_native_clone intrinsic.
326
327 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
328 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
329 reinterpret_cast<jlong*>((oopDesc*)dst),
330 align_object_size(size) / HeapWordsPerLong);
331 // Clear the header
332 dst->init_mark();
333 }
334
335 template <DecoratorSet decorators>
336 inline void RawAccessBarrier<decorators>::value_copy(const ValuePayload& src, const ValuePayload& dst) {
337 precond(src.klass() == dst.klass());
338
339 const InlineKlass* klass = src.klass();
340 const LayoutKind copy_layout = LayoutKindHelper::get_copy_layout(
341 src.layout_kind(), dst.layout_kind());
342 const int size = klass->layout_size_in_bytes(copy_layout);
343
344 AccessInternal::value_copy_internal(src.addr(), dst.addr(),
345 static_cast<size_t>(size));
346 }
347
348 template <DecoratorSet decorators>
349 inline void RawAccessBarrier<decorators>::value_store_null(const ValuePayload& dst) {
350 address dst_addr = dst.addr();
351 const LayoutKind lk = dst.layout_kind();
352 const InlineKlass* klass = dst.klass();
353 const int size = klass->layout_size_in_bytes(lk);
354
355 AccessInternal::value_store_null(dst_addr, static_cast<size_t>(size));
356 }
357
358 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
|