13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/globals.hpp"
35 #include "utilities/debug.hpp"
36 #include "utilities/globalDefinitions.hpp"
37
38 // Result from oop_arraycopy
39 enum class OopCopyResult {
40 ok, // oop array copy sucessful
41 failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
42 failed_check_null // oop array copy failed null check (ARRAYCOPY_NOTNULL)
43 };
44
45 // This metafunction returns either oop or narrowOop depending on whether
46 // an access needs to use compressed oops or not.
47 template <DecoratorSet decorators>
48 struct HeapOopType: AllStatic {
49 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
50 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
51 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
52 };
53
54 namespace AccessInternal {
55 enum BarrierType {
56 BARRIER_STORE,
57 BARRIER_STORE_AT,
58 BARRIER_LOAD,
59 BARRIER_LOAD_AT,
60 BARRIER_ATOMIC_CMPXCHG,
61 BARRIER_ATOMIC_CMPXCHG_AT,
62 BARRIER_ATOMIC_XCHG,
63 BARRIER_ATOMIC_XCHG_AT,
64 BARRIER_ARRAYCOPY,
65 BARRIER_CLONE
66 };
67
68 template <DecoratorSet decorators, typename T>
69 struct MustConvertCompressedOop: public std::integral_constant<bool,
70 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
71 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
72 std::is_same<T, oop>::value> {};
73
74 // This metafunction returns an appropriate oop type if the value is oop-like
75 // and otherwise returns the same type T.
76 template <DecoratorSet decorators, typename T>
77 struct EncodedType: AllStatic {
78 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
79 typename HeapOopType<decorators>::type,
80 T>;
81 };
82
83 template <DecoratorSet decorators>
84 inline typename HeapOopType<decorators>::type*
85 oop_field_addr(oop base, ptrdiff_t byte_offset) {
86 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
87 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
88 }
89
90 template <DecoratorSet decorators, typename T>
91 struct AccessFunctionTypes {
92 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
93 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
94 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
95 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
96
97 typedef T (*load_func_t)(void* addr);
98 typedef void (*store_func_t)(void* addr, T value);
99 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
100 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
101
102 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
103 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
104 size_t length);
105 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
106 };
107
108 template <DecoratorSet decorators>
109 struct AccessFunctionTypes<decorators, void> {
110 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
111 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
112 size_t length);
113 };
114
115 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
116
117 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
118 template <DecoratorSet decorators, typename T> \
119 struct AccessFunction<decorators, T, bt>: AllStatic{ \
120 typedef typename AccessFunctionTypes<decorators, T>::func type; \
121 }
122 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
123 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
124 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
125 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
126 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
127 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
128 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
129 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
130 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
131 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
132 #undef ACCESS_GENERATE_ACCESS_FUNCTION
133
134 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
135 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
136
137 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
138 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
139
140 void* field_addr(oop base, ptrdiff_t offset);
141
142 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
143 // faster build times, given how frequently included access is.
144 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
145 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
146 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
147
148 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
149 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
150
151 template<typename T>
152 void arraycopy_conjoint(T* src, T* dst, size_t length);
153 template<typename T>
154 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
155 template<typename T>
156 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
157 }
158
159 // This mask specifies what decorators are relevant for raw accesses. When passing
160 // accesses to the raw layer, irrelevant decorators are removed.
161 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
162 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
163
164 // The RawAccessBarrier performs raw accesses with additional knowledge of
165 // memory ordering, so that OrderAccess/Atomic is called when necessary.
166 // It additionally handles compressed oops, and hence is not completely "raw"
167 // strictly speaking.
168 template <DecoratorSet decorators>
169 class RawAccessBarrier: public AllStatic {
170 protected:
171 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
172 return AccessInternal::field_addr(base, byte_offset);
173 }
174
175 protected:
176 // Only encode if INTERNAL_VALUE_IS_OOP
276 static inline void store(void* addr, T value) {
277 store_internal<decorators>(addr, value);
278 }
279
280 template <typename T>
281 static inline T load(void* addr) {
282 return load_internal<decorators, T>(addr);
283 }
284
285 template <typename T>
286 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
287 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
288 }
289
290 template <typename T>
291 static inline T atomic_xchg(void* addr, T new_value) {
292 return atomic_xchg_internal<decorators>(addr, new_value);
293 }
294
295 template <typename T>
296 static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
297 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
298 size_t length);
299
300 template <typename T>
301 static void oop_store(void* addr, T value);
302 template <typename T>
303 static void oop_store_at(oop base, ptrdiff_t offset, T value);
304
305 template <typename T>
306 static T oop_load(void* addr);
307 template <typename T>
308 static T oop_load_at(oop base, ptrdiff_t offset);
309
310 template <typename T>
311 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
312 template <typename T>
313 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
314
315 template <typename T>
316 static T oop_atomic_xchg(void* addr, T new_value);
321 static void store_at(oop base, ptrdiff_t offset, T value) {
322 store(field_addr(base, offset), value);
323 }
324
325 template <typename T>
326 static T load_at(oop base, ptrdiff_t offset) {
327 return load<T>(field_addr(base, offset));
328 }
329
330 template <typename T>
331 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
332 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
333 }
334
335 template <typename T>
336 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
337 return atomic_xchg(field_addr(base, offset), new_value);
338 }
339
340 template <typename T>
341 static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
342 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
343 size_t length);
344
345 static void clone(oop src, oop dst, size_t size);
346 };
347
348 namespace AccessInternal {
349 DEBUG_ONLY(void check_access_thread_state());
350 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
351 }
352
353 // Below is the implementation of the first 4 steps of the template pipeline:
354 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
355 // and sets default decorators to sensible values.
356 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
357 // multiple types. The P type of the address and T type of the value must
358 // match.
359 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
360 // avoided, and in that case avoids it (calling raw accesses or
361 // primitive accesses in a build that does not require primitive GC barriers)
362 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
363 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
364 // to the access.
365
522 assert_access_thread_state();
523 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
524 dst_obj, dst_offset_in_bytes, dst_raw,
525 length);
526 }
527 };
528
529 template <DecoratorSet decorators, typename T>
530 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
531 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
532 static func_t _clone_func;
533
534 static void clone_init(oop src, oop dst, size_t size);
535
536 static inline void clone(oop src, oop dst, size_t size) {
537 assert_access_thread_state();
538 _clone_func(src, dst, size);
539 }
540 };
541
542 // Initialize the function pointers to point to the resolving function.
543 template <DecoratorSet decorators, typename T>
544 typename AccessFunction<decorators, T, BARRIER_STORE>::type
545 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
546
547 template <DecoratorSet decorators, typename T>
548 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
549 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
550
551 template <DecoratorSet decorators, typename T>
552 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
553 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
554
555 template <DecoratorSet decorators, typename T>
556 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
557 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
558
559 template <DecoratorSet decorators, typename T>
560 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
561 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
563 template <DecoratorSet decorators, typename T>
564 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
565 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
566
567 template <DecoratorSet decorators, typename T>
568 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
569 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
570
571 template <DecoratorSet decorators, typename T>
572 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
573 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
574
575 template <DecoratorSet decorators, typename T>
576 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
577 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
578
579 template <DecoratorSet decorators, typename T>
580 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
581 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
582
583 // Step 3: Pre-runtime dispatching.
584 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
585 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
586 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
587 // not possible.
588 struct PreRuntimeDispatch: AllStatic {
589 template<DecoratorSet decorators>
590 struct CanHardwireRaw: public std::integral_constant<
591 bool,
592 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
593 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
594 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
595 {};
596
597 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
598
599 template<DecoratorSet decorators>
600 static bool is_hardwired_primitive() {
601 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
602 }
879 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
880 dst_obj, dst_offset_in_bytes, dst_raw,
881 length);
882 }
883 }
884
885 template <DecoratorSet decorators>
886 inline static typename EnableIf<
887 HasDecorator<decorators, AS_RAW>::value>::type
888 clone(oop src, oop dst, size_t size) {
889 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
890 Raw::clone(src, dst, size);
891 }
892
893 template <DecoratorSet decorators>
894 inline static typename EnableIf<
895 !HasDecorator<decorators, AS_RAW>::value>::type
896 clone(oop src, oop dst, size_t size) {
897 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
898 }
899 };
900
901 // Step 2: Reduce types.
902 // Enforce that for non-oop types, T and P have to be strictly the same.
903 // P is the type of the address and T is the type of the values.
904 // As for oop types, it is allow to send T in {narrowOop, oop} and
905 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
906 // the subsequent table. (columns are P, rows are T)
907 // | | HeapWord | oop | narrowOop |
908 // | oop | rt-comp | hw-none | hw-comp |
909 // | narrowOop | x | x | hw-none |
910 //
911 // x means not allowed
912 // rt-comp means it must be checked at runtime whether the oop is compressed.
913 // hw-none means it is statically known the oop will not be compressed.
914 // hw-comp means it is statically known the oop will be compressed.
915
916 template <DecoratorSet decorators, typename T>
917 inline void store_reduce_types(T* addr, T value) {
918 PreRuntimeDispatch::store<decorators>(addr, value);
1171 template <DecoratorSet decorators, typename T>
1172 inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1173 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1174 size_t length) {
1175 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1176 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1177 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1178 using DecayedT = std::decay_t<T>;
1179 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1180 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1181 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1182 length);
1183 }
1184
1185 template <DecoratorSet decorators>
1186 inline void clone(oop src, oop dst, size_t size) {
1187 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1188 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1189 }
1190
1191 // Infer the type that should be returned from an Access::oop_load.
1192 template <typename P, DecoratorSet decorators>
1193 class OopLoadProxy: public StackObj {
1194 private:
1195 P *const _addr;
1196 public:
1197 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1198
1199 inline operator oop() {
1200 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1201 }
1202
1203 inline operator narrowOop() {
1204 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1205 }
1206
1207 template <typename T>
1208 inline bool operator ==(const T& other) const {
1209 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1210 }
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/inlineKlass.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/globals.hpp"
36 #include "utilities/debug.hpp"
37 #include "utilities/globalDefinitions.hpp"
38
39 // Result from oop_arraycopy
40 enum class OopCopyResult {
41 ok, // oop array copy sucessful
42 failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
43 failed_check_null // oop array copy failed null check (ARRAYCOPY_NOTNULL)
44 };
45
46 // This metafunction returns either oop or narrowOop depending on whether
47 // an access needs to use compressed oops or not.
48 template <DecoratorSet decorators>
49 struct HeapOopType: AllStatic {
50 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
51 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
52 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
53 };
54
55 // This meta-function returns either oop or narrowOop depending on whether
56 // a back-end needs to consider compressed oops types or not.
57 template <DecoratorSet decorators>
58 struct ValueOopType: AllStatic {
59 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
60 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
61 };
62
63 namespace AccessInternal {
64 enum BarrierType {
65 BARRIER_STORE,
66 BARRIER_STORE_AT,
67 BARRIER_LOAD,
68 BARRIER_LOAD_AT,
69 BARRIER_ATOMIC_CMPXCHG,
70 BARRIER_ATOMIC_CMPXCHG_AT,
71 BARRIER_ATOMIC_XCHG,
72 BARRIER_ATOMIC_XCHG_AT,
73 BARRIER_ARRAYCOPY,
74 BARRIER_CLONE,
75 BARRIER_VALUE_COPY,
76 BARRIER_VALUE_STORE_NULL,
77 };
78
79 template <DecoratorSet decorators, typename T>
80 struct MustConvertCompressedOop: public std::integral_constant<bool,
81 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
82 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
83 std::is_same<T, oop>::value> {};
84
85 // This metafunction returns an appropriate oop type if the value is oop-like
86 // and otherwise returns the same type T.
87 template <DecoratorSet decorators, typename T>
88 struct EncodedType: AllStatic {
89 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
90 typename HeapOopType<decorators>::type,
91 T>;
92 };
93
94 template <DecoratorSet decorators>
95 inline typename HeapOopType<decorators>::type*
96 oop_field_addr(oop base, ptrdiff_t byte_offset) {
97 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
98 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
99 }
100
101 template <DecoratorSet decorators, typename T>
102 struct AccessFunctionTypes {
103 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
104 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
105 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
106 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
107
108 typedef T (*load_func_t)(void* addr);
109 typedef void (*store_func_t)(void* addr, T value);
110 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
111 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
112
113 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
114 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
115 size_t length);
116 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
117 typedef void (*value_copy_func_t)(const ValuePayload& src, const ValuePayload& dst);
118 typedef void (*value_store_null_func_t)(const ValuePayload& dst);
119 };
120
121 template <DecoratorSet decorators>
122 struct AccessFunctionTypes<decorators, void> {
123 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
124 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
125 size_t length);
126 };
127
128 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
129
130 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
131 template <DecoratorSet decorators, typename T> \
132 struct AccessFunction<decorators, T, bt>: AllStatic{ \
133 typedef typename AccessFunctionTypes<decorators, T>::func type; \
134 }
135 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
136 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
137 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
138 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
139 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
140 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
141 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
142 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
143 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
144 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
145 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
146 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_STORE_NULL, value_store_null_func_t);
147 #undef ACCESS_GENERATE_ACCESS_FUNCTION
148
149 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
150 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
151
152 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
153 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
154
155 void* field_addr(oop base, ptrdiff_t offset);
156
157 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
158 // faster build times, given how frequently included access is.
159 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
160 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
161 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
162
163 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
164 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
165
166 template<typename T>
167 void arraycopy_conjoint(T* src, T* dst, size_t length);
168 template<typename T>
169 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
170 template<typename T>
171 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
172
173 void value_copy_internal(void* src, void* dst, size_t length);
174 void value_store_null(void* dst, size_t length);
175 }
176
177 // This mask specifies what decorators are relevant for raw accesses. When passing
178 // accesses to the raw layer, irrelevant decorators are removed.
179 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
180 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
181
182 // The RawAccessBarrier performs raw accesses with additional knowledge of
183 // memory ordering, so that OrderAccess/Atomic is called when necessary.
184 // It additionally handles compressed oops, and hence is not completely "raw"
185 // strictly speaking.
186 template <DecoratorSet decorators>
187 class RawAccessBarrier: public AllStatic {
188 protected:
189 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
190 return AccessInternal::field_addr(base, byte_offset);
191 }
192
193 protected:
194 // Only encode if INTERNAL_VALUE_IS_OOP
294 static inline void store(void* addr, T value) {
295 store_internal<decorators>(addr, value);
296 }
297
298 template <typename T>
299 static inline T load(void* addr) {
300 return load_internal<decorators, T>(addr);
301 }
302
303 template <typename T>
304 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
305 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
306 }
307
308 template <typename T>
309 static inline T atomic_xchg(void* addr, T new_value) {
310 return atomic_xchg_internal<decorators>(addr, new_value);
311 }
312
313 template <typename T>
314 static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
315 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
316 size_t length);
317
318 template <typename T>
319 static void oop_store(void* addr, T value);
320 template <typename T>
321 static void oop_store_at(oop base, ptrdiff_t offset, T value);
322
323 template <typename T>
324 static T oop_load(void* addr);
325 template <typename T>
326 static T oop_load_at(oop base, ptrdiff_t offset);
327
328 template <typename T>
329 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
330 template <typename T>
331 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
332
333 template <typename T>
334 static T oop_atomic_xchg(void* addr, T new_value);
339 static void store_at(oop base, ptrdiff_t offset, T value) {
340 store(field_addr(base, offset), value);
341 }
342
343 template <typename T>
344 static T load_at(oop base, ptrdiff_t offset) {
345 return load<T>(field_addr(base, offset));
346 }
347
348 template <typename T>
349 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
350 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
351 }
352
353 template <typename T>
354 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
355 return atomic_xchg(field_addr(base, offset), new_value);
356 }
357
358 template <typename T>
359 static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
360 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
361 size_t length);
362
363 static void clone(oop src, oop dst, size_t size);
364 static void value_copy(const ValuePayload& src, const ValuePayload& dst);
365 static void value_store_null(const ValuePayload& dst);
366 };
367
368 namespace AccessInternal {
369 DEBUG_ONLY(void check_access_thread_state());
370 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
371 }
372
373 // Below is the implementation of the first 4 steps of the template pipeline:
374 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
375 // and sets default decorators to sensible values.
376 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
377 // multiple types. The P type of the address and T type of the value must
378 // match.
379 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
380 // avoided, and in that case avoids it (calling raw accesses or
381 // primitive accesses in a build that does not require primitive GC barriers)
382 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
383 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
384 // to the access.
385
542 assert_access_thread_state();
543 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
544 dst_obj, dst_offset_in_bytes, dst_raw,
545 length);
546 }
547 };
548
549 template <DecoratorSet decorators, typename T>
550 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
551 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
552 static func_t _clone_func;
553
554 static void clone_init(oop src, oop dst, size_t size);
555
556 static inline void clone(oop src, oop dst, size_t size) {
557 assert_access_thread_state();
558 _clone_func(src, dst, size);
559 }
560 };
561
562 template <DecoratorSet decorators, typename T>
563 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
564 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
565 static func_t _value_copy_func;
566
567 static void value_copy_init(const ValuePayload& src, const ValuePayload& dst);
568
569 static inline void value_copy(const ValuePayload& src, const ValuePayload& dst) {
570 _value_copy_func(src, dst);
571 }
572 };
573
574 template <DecoratorSet decorators, typename T>
575 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_STORE_NULL>: AllStatic {
576 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_STORE_NULL>::type func_t;
577 static func_t _value_store_null_func;
578
579 static void value_store_null_init(const ValuePayload& dst);
580
581 static inline void value_store_null(const ValuePayload& dst) {
582 _value_store_null_func(dst);
583 }
584 };
585
586 // Initialize the function pointers to point to the resolving function.
587 template <DecoratorSet decorators, typename T>
588 typename AccessFunction<decorators, T, BARRIER_STORE>::type
589 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
590
591 template <DecoratorSet decorators, typename T>
592 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
593 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
594
595 template <DecoratorSet decorators, typename T>
596 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
597 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
598
599 template <DecoratorSet decorators, typename T>
600 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
601 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
602
603 template <DecoratorSet decorators, typename T>
604 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
605 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
607 template <DecoratorSet decorators, typename T>
608 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
609 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
610
611 template <DecoratorSet decorators, typename T>
612 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
613 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
614
615 template <DecoratorSet decorators, typename T>
616 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
617 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
618
619 template <DecoratorSet decorators, typename T>
620 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
621 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
622
623 template <DecoratorSet decorators, typename T>
624 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
625 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
626
627 template <DecoratorSet decorators, typename T>
628 typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
629 RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
630
631 template <DecoratorSet decorators, typename T>
632 typename AccessFunction<decorators, T, BARRIER_VALUE_STORE_NULL>::type
633 RuntimeDispatch<decorators, T, BARRIER_VALUE_STORE_NULL>::_value_store_null_func = &value_store_null_init;
634
635 // Step 3: Pre-runtime dispatching.
636 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
637 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
638 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
639 // not possible.
640 struct PreRuntimeDispatch: AllStatic {
641 template<DecoratorSet decorators>
642 struct CanHardwireRaw: public std::integral_constant<
643 bool,
644 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
645 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
646 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
647 {};
648
649 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
650
651 template<DecoratorSet decorators>
652 static bool is_hardwired_primitive() {
653 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
654 }
931 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
932 dst_obj, dst_offset_in_bytes, dst_raw,
933 length);
934 }
935 }
936
937 template <DecoratorSet decorators>
938 inline static typename EnableIf<
939 HasDecorator<decorators, AS_RAW>::value>::type
940 clone(oop src, oop dst, size_t size) {
941 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
942 Raw::clone(src, dst, size);
943 }
944
945 template <DecoratorSet decorators>
946 inline static typename EnableIf<
947 !HasDecorator<decorators, AS_RAW>::value>::type
948 clone(oop src, oop dst, size_t size) {
949 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
950 }
951
952 template <DecoratorSet decorators>
953 inline static typename EnableIf<
954 HasDecorator<decorators, AS_RAW>::value>::type
955 value_copy(const ValuePayload& src, const ValuePayload& dst) {
956 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
957 Raw::value_copy(src, dst);
958 }
959
960 template <DecoratorSet decorators>
961 inline static typename EnableIf<
962 !HasDecorator<decorators, AS_RAW>::value>::type
963 value_copy(const ValuePayload& src, const ValuePayload& dst) {
964 const DecoratorSet expanded_decorators = decorators;
965 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst);
966 }
967
968 template <DecoratorSet decorators>
969 inline static typename EnableIf<
970 HasDecorator<decorators, AS_RAW>::value>::type
971 value_store_null(const ValuePayload& dst) {
972 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
973 Raw::value_store_null(dst);
974 }
975
976 template <DecoratorSet decorators>
977 inline static typename EnableIf<
978 !HasDecorator<decorators, AS_RAW>::value>::type
979 value_store_null(const ValuePayload& dst) {
980 const DecoratorSet expanded_decorators = decorators;
981 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_STORE_NULL>::value_store_null(dst);
982 }
983 };
984
985 // Step 2: Reduce types.
986 // Enforce that for non-oop types, T and P have to be strictly the same.
987 // P is the type of the address and T is the type of the values.
988 // As for oop types, it is allow to send T in {narrowOop, oop} and
989 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
990 // the subsequent table. (columns are P, rows are T)
991 // | | HeapWord | oop | narrowOop |
992 // | oop | rt-comp | hw-none | hw-comp |
993 // | narrowOop | x | x | hw-none |
994 //
995 // x means not allowed
996 // rt-comp means it must be checked at runtime whether the oop is compressed.
997 // hw-none means it is statically known the oop will not be compressed.
998 // hw-comp means it is statically known the oop will be compressed.
999
1000 template <DecoratorSet decorators, typename T>
1001 inline void store_reduce_types(T* addr, T value) {
1002 PreRuntimeDispatch::store<decorators>(addr, value);
1255 template <DecoratorSet decorators, typename T>
1256 inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1257 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1258 size_t length) {
1259 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1260 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1261 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1262 using DecayedT = std::decay_t<T>;
1263 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1264 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1265 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1266 length);
1267 }
1268
1269 template <DecoratorSet decorators>
1270 inline void clone(oop src, oop dst, size_t size) {
1271 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1272 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1273 }
1274
1275 template <DecoratorSet decorators>
1276 inline void value_copy(const ValuePayload& src, const ValuePayload& dst) {
1277 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1278 PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst);
1279 }
1280
1281 template <DecoratorSet decorators>
1282 static inline void value_store_null(const ValuePayload& dst) {
1283 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1284 PreRuntimeDispatch::value_store_null<expanded_decorators>(dst);
1285 }
1286
1287 // Infer the type that should be returned from an Access::oop_load.
1288 template <typename P, DecoratorSet decorators>
1289 class OopLoadProxy: public StackObj {
1290 private:
1291 P *const _addr;
1292 public:
1293 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1294
1295 inline operator oop() {
1296 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1297 }
1298
1299 inline operator narrowOop() {
1300 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1301 }
1302
1303 template <typename T>
1304 inline bool operator ==(const T& other) const {
1305 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1306 }
|