1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
 26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
 27 
 28 #include "oops/accessBackend.hpp"
 29 
 30 #include "oops/access.hpp"
 31 #include "oops/arrayOop.hpp"
 32 #include "oops/compressedOops.inline.hpp"
 33 #include "oops/oopsHierarchy.hpp"
 34 #include "runtime/atomic.hpp"
 35 #include "runtime/orderAccess.hpp"
 36 #include "oops/inlineKlass.hpp"
 37 
 38 template <DecoratorSet decorators>
 39 template <DecoratorSet idecorators, typename T>
 40 inline typename EnableIf<
 41   AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 42 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
 43   if (HasDecorator<decorators, IS_NOT_NULL>::value) {
 44     return CompressedOops::decode_not_null(value);
 45   } else {
 46     return CompressedOops::decode(value);
 47   }
 48 }
 49 
 50 template <DecoratorSet decorators>
 51 template <DecoratorSet idecorators, typename T>
 52 inline typename EnableIf<
 53   AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
 54   typename HeapOopType<idecorators>::type>::type
 55 RawAccessBarrier<decorators>::encode_internal(T value) {
 56   if (HasDecorator<decorators, IS_NOT_NULL>::value) {
 57     return CompressedOops::encode_not_null(value);
 58   } else {
 59     return CompressedOops::encode(value);
 60   }
 61 }
 62 
 63 template <DecoratorSet decorators>
 64 template <typename T>
 65 inline void RawAccessBarrier<decorators>::oop_store(void* addr, T value) {
 66   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 67   Encoded encoded = encode(value);
 68   store(reinterpret_cast<Encoded*>(addr), encoded);
 69 }
 70 
 71 template <DecoratorSet decorators>
 72 template <typename T>
 73 inline void RawAccessBarrier<decorators>::oop_store_at(oop base, ptrdiff_t offset, T value) {
 74   oop_store(field_addr(base, offset), value);
 75 }
 76 
 77 template <DecoratorSet decorators>
 78 template <typename T>
 79 inline T RawAccessBarrier<decorators>::oop_load(void* addr) {
 80   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 81   Encoded encoded = load<Encoded>(reinterpret_cast<Encoded*>(addr));
 82   return decode<T>(encoded);
 83 }
 84 
 85 template <DecoratorSet decorators>
 86 template <typename T>
 87 inline T RawAccessBarrier<decorators>::oop_load_at(oop base, ptrdiff_t offset) {
 88   return oop_load<T>(field_addr(base, offset));
 89 }
 90 
 91 template <DecoratorSet decorators>
 92 template <typename T>
 93 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 94   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 95   Encoded encoded_new = encode(new_value);
 96   Encoded encoded_compare = encode(compare_value);
 97   Encoded encoded_result = atomic_cmpxchg(reinterpret_cast<Encoded*>(addr),
 98                                           encoded_compare,
 99                                           encoded_new);
100   return decode<T>(encoded_result);
101 }
102 
103 template <DecoratorSet decorators>
104 template <typename T>
105 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
106   return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
107 }
108 
109 template <DecoratorSet decorators>
110 template <typename T>
111 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
112   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
113   Encoded encoded_new = encode(new_value);
114   Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
115   return decode<T>(encoded_result);
116 }
117 
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
121   return oop_atomic_xchg(field_addr(base, offset), new_value);
122 }
123 
124 template <DecoratorSet decorators>
125 template <typename T>
126 inline void RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
127                                                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
128                                                         size_t length) {
129   arraycopy(src_obj, src_offset_in_bytes, src_raw,
130             dst_obj, dst_offset_in_bytes, dst_raw,
131             length);
132 }
133 
134 template <DecoratorSet decorators>
135 template <DecoratorSet ds, typename T>
136 inline typename EnableIf<
137   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
138 RawAccessBarrier<decorators>::load_internal(void* addr) {
139   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
140     OrderAccess::fence();
141   }
142   return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
143 }
144 
145 template <DecoratorSet decorators>
146 template <DecoratorSet ds, typename T>
147 inline typename EnableIf<
148   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
149 RawAccessBarrier<decorators>::load_internal(void* addr) {
150   return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
151 }
152 
153 template <DecoratorSet decorators>
154 template <DecoratorSet ds, typename T>
155 inline typename EnableIf<
156   HasDecorator<ds, MO_RELAXED>::value, T>::type
157 RawAccessBarrier<decorators>::load_internal(void* addr) {
158   return Atomic::load(reinterpret_cast<const volatile T*>(addr));
159 }
160 
161 template <DecoratorSet decorators>
162 template <DecoratorSet ds, typename T>
163 inline typename EnableIf<
164   HasDecorator<ds, MO_SEQ_CST>::value>::type
165 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
166   Atomic::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
167 }
168 
169 template <DecoratorSet decorators>
170 template <DecoratorSet ds, typename T>
171 inline typename EnableIf<
172   HasDecorator<ds, MO_RELEASE>::value>::type
173 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
174   Atomic::release_store(reinterpret_cast<volatile T*>(addr), value);
175 }
176 
177 template <DecoratorSet decorators>
178 template <DecoratorSet ds, typename T>
179 inline typename EnableIf<
180   HasDecorator<ds, MO_RELAXED>::value>::type
181 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
182   Atomic::store(reinterpret_cast<volatile T*>(addr), value);
183 }
184 
185 template <DecoratorSet decorators>
186 template <DecoratorSet ds, typename T>
187 inline typename EnableIf<
188   HasDecorator<ds, MO_RELAXED>::value, T>::type
189 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
190   return Atomic::cmpxchg(reinterpret_cast<volatile T*>(addr),
191                          compare_value,
192                          new_value,
193                          memory_order_relaxed);
194 }
195 
196 template <DecoratorSet decorators>
197 template <DecoratorSet ds, typename T>
198 inline typename EnableIf<
199   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
200 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
201   return Atomic::cmpxchg(reinterpret_cast<volatile T*>(addr),
202                          compare_value,
203                          new_value,
204                          memory_order_conservative);
205 }
206 
207 template <DecoratorSet decorators>
208 template <DecoratorSet ds, typename T>
209 inline typename EnableIf<
210   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
211 RawAccessBarrier<decorators>::atomic_xchg_internal(void* addr, T new_value) {
212   return Atomic::xchg(reinterpret_cast<volatile T*>(addr),
213                       new_value);
214 }
215 
216 // For platforms that do not have native support for wide atomics,
217 // we can emulate the atomicity using a lock. So here we check
218 // whether that is necessary or not.
219 
220 template <DecoratorSet ds>
221 template <DecoratorSet decorators, typename T>
222 inline typename EnableIf<
223   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
224 RawAccessBarrier<ds>::atomic_xchg_maybe_locked(void* addr, T new_value) {
225   if (!AccessInternal::wide_atomic_needs_locking()) {
226     return atomic_xchg_internal<ds>(addr, new_value);
227   } else {
228     AccessInternal::AccessLocker access_lock;
229     volatile T* p = reinterpret_cast<volatile T*>(addr);
230     T old_val = RawAccess<>::load(p);
231     RawAccess<>::store(p, new_value);
232     return old_val;
233   }
234 }
235 
236 template <DecoratorSet ds>
237 template <DecoratorSet decorators, typename T>
238 inline typename EnableIf<
239   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
240 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
241   if (!AccessInternal::wide_atomic_needs_locking()) {
242     return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
243   } else {
244     AccessInternal::AccessLocker access_lock;
245     volatile T* p = reinterpret_cast<volatile T*>(addr);
246     T old_val = RawAccess<>::load(p);
247     if (old_val == compare_value) {
248       RawAccess<>::store(p, new_value);
249     }
250     return old_val;
251   }
252 }
253 
254 class RawAccessBarrierArrayCopy: public AllStatic {
255   template<typename T> struct IsHeapWordSized: public IntegralConstant<bool, sizeof(T) == HeapWordSize> { };
256 public:
257   template <DecoratorSet decorators, typename T>
258   static inline typename EnableIf<
259     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
260   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
261             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
262             size_t length) {
263     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
264     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
265 
266     // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
267     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
268       AccessInternal::arraycopy_arrayof_conjoint_oops(src_raw, dst_raw, length);
269     } else {
270       typedef typename HeapOopType<decorators>::type OopType;
271       AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src_raw),
272                                               reinterpret_cast<OopType*>(dst_raw), length);
273     }
274   }
275 
276   template <DecoratorSet decorators, typename T>
277   static inline typename EnableIf<
278     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
279     HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value>::type
280   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
281             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
282             size_t length) {
283     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
284     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
285 
286     AccessInternal::arraycopy_arrayof_conjoint(src_raw, dst_raw, length);
287   }
288 
289   template <DecoratorSet decorators, typename T>
290   static inline typename EnableIf<
291     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
292     HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value>::type
293   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
294             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
295             size_t length) {
296     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
297     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
298 
299     // There is only a disjoint optimization for word granularity copying
300     if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
301       AccessInternal::arraycopy_disjoint_words_atomic(src_raw, dst_raw, length);
302     } else {
303       AccessInternal::arraycopy_disjoint_words(src_raw, dst_raw, length);
304     }
305   }
306 
307   template <DecoratorSet decorators, typename T>
308   static inline typename EnableIf<
309     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
310     !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
311     !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
312     !HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
313   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
314             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
315             size_t length) {
316     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
317     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
318 
319     AccessInternal::arraycopy_conjoint(src_raw, dst_raw, length);
320   }
321 
322   template <DecoratorSet decorators, typename T>
323   static inline typename EnableIf<
324     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
325     !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
326     !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
327     HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
328   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
329             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
330             size_t length) {
331     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
332     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
333 
334     AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
335   }
336 };
337 
338 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public IntegralConstant<bool, false> { };
339 
340 template <DecoratorSet decorators>
341 template <typename T>
342 inline void RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
343                                                     arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
344                                                     size_t length) {
345   RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
346                                                    dst_obj, dst_offset_in_bytes, dst_raw,
347                                                    length);
348 }
349 
350 template <DecoratorSet decorators>
351 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
352   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
353   // is modifying a reference field in the clonee, a non-oop-atomic copy might
354   // be suspended in the middle of copying the pointer and end up with parts
355   // of two different pointers in the field.  Subsequent dereferences will crash.
356   // 4846409: an oop-copy of objects with long or double fields or arrays of same
357   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
358   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
359   // The same is true of StubRoutines::object_copy and the various oop_copy
360   // variants, and of the code generated by the inline_native_clone intrinsic.
361 
362   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
363   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
364                                             reinterpret_cast<jlong*>((oopDesc*)dst),
365                                             align_object_size(size) / HeapWordsPerLong);
366   // Clear the header
367   dst->init_mark();
368 }
369 
370 template <DecoratorSet decorators>
371 inline void RawAccessBarrier<decorators>::value_copy(void* src, void* dst, InlineKlass* md) {
372   assert(is_aligned(src, md->get_alignment()) && is_aligned(dst, md->get_alignment()), "Unalign value_copy");
373   AccessInternal::arraycopy_conjoint_atomic(src, dst, static_cast<size_t>(md->get_exact_size_in_bytes()));
374 }
375 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP