13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/oopsHierarchy.hpp"
34 #include "runtime/globals.hpp"
35 #include "utilities/debug.hpp"
36 #include "utilities/globalDefinitions.hpp"
37
38 // This metafunction returns either oop or narrowOop depending on whether
39 // an access needs to use compressed oops or not.
40 template <DecoratorSet decorators>
41 struct HeapOopType: AllStatic {
42 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
43 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
44 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
45 };
46
47 namespace AccessInternal {
48 enum BarrierType {
49 BARRIER_STORE,
50 BARRIER_STORE_AT,
51 BARRIER_LOAD,
52 BARRIER_LOAD_AT,
53 BARRIER_ATOMIC_CMPXCHG,
54 BARRIER_ATOMIC_CMPXCHG_AT,
55 BARRIER_ATOMIC_XCHG,
56 BARRIER_ATOMIC_XCHG_AT,
57 BARRIER_ARRAYCOPY,
58 BARRIER_CLONE
59 };
60
61 template <DecoratorSet decorators, typename T>
62 struct MustConvertCompressedOop: public std::integral_constant<bool,
63 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
64 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
65 std::is_same<T, oop>::value> {};
66
67 // This metafunction returns an appropriate oop type if the value is oop-like
68 // and otherwise returns the same type T.
69 template <DecoratorSet decorators, typename T>
70 struct EncodedType: AllStatic {
71 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
72 typename HeapOopType<decorators>::type,
73 T>;
74 };
75
76 template <DecoratorSet decorators>
77 inline typename HeapOopType<decorators>::type*
78 oop_field_addr(oop base, ptrdiff_t byte_offset) {
79 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
80 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
81 }
82
83 template <DecoratorSet decorators, typename T>
84 struct AccessFunctionTypes {
85 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
86 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
87 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
88 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
89
90 typedef T (*load_func_t)(void* addr);
91 typedef void (*store_func_t)(void* addr, T value);
92 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
93 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
94
95 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
96 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
97 size_t length);
98 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
99 };
100
101 template <DecoratorSet decorators>
102 struct AccessFunctionTypes<decorators, void> {
103 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
104 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
105 size_t length);
106 };
107
108 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
109
110 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
111 template <DecoratorSet decorators, typename T> \
112 struct AccessFunction<decorators, T, bt>: AllStatic{ \
113 typedef typename AccessFunctionTypes<decorators, T>::func type; \
114 }
115 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
116 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
117 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
118 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
119 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
120 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
121 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
122 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
123 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
124 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
125 #undef ACCESS_GENERATE_ACCESS_FUNCTION
126
127 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
128 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
129
130 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
131 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
132
133 void* field_addr(oop base, ptrdiff_t offset);
134
135 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
136 // faster build times, given how frequently included access is.
137 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
138 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
139 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
140
141 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
142 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
143
144 template<typename T>
145 void arraycopy_conjoint(T* src, T* dst, size_t length);
146 template<typename T>
147 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
148 template<typename T>
149 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
150 }
151
152 // This mask specifies what decorators are relevant for raw accesses. When passing
153 // accesses to the raw layer, irrelevant decorators are removed.
154 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
155 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
156
157 // The RawAccessBarrier performs raw accesses with additional knowledge of
158 // memory ordering, so that OrderAccess/Atomic is called when necessary.
159 // It additionally handles compressed oops, and hence is not completely "raw"
160 // strictly speaking.
161 template <DecoratorSet decorators>
162 class RawAccessBarrier: public AllStatic {
163 protected:
164 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
165 return AccessInternal::field_addr(base, byte_offset);
166 }
167
168 protected:
169 // Only encode if INTERNAL_VALUE_IS_OOP
269 static inline void store(void* addr, T value) {
270 store_internal<decorators>(addr, value);
271 }
272
273 template <typename T>
274 static inline T load(void* addr) {
275 return load_internal<decorators, T>(addr);
276 }
277
278 template <typename T>
279 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
280 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
281 }
282
283 template <typename T>
284 static inline T atomic_xchg(void* addr, T new_value) {
285 return atomic_xchg_internal<decorators>(addr, new_value);
286 }
287
288 template <typename T>
289 static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
290 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
291 size_t length);
292
293 template <typename T>
294 static void oop_store(void* addr, T value);
295 template <typename T>
296 static void oop_store_at(oop base, ptrdiff_t offset, T value);
297
298 template <typename T>
299 static T oop_load(void* addr);
300 template <typename T>
301 static T oop_load_at(oop base, ptrdiff_t offset);
302
303 template <typename T>
304 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
305 template <typename T>
306 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
307
308 template <typename T>
309 static T oop_atomic_xchg(void* addr, T new_value);
314 static void store_at(oop base, ptrdiff_t offset, T value) {
315 store(field_addr(base, offset), value);
316 }
317
318 template <typename T>
319 static T load_at(oop base, ptrdiff_t offset) {
320 return load<T>(field_addr(base, offset));
321 }
322
323 template <typename T>
324 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
325 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
326 }
327
328 template <typename T>
329 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
330 return atomic_xchg(field_addr(base, offset), new_value);
331 }
332
333 template <typename T>
334 static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
335 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
336 size_t length);
337
338 static void clone(oop src, oop dst, size_t size);
339 };
340
341 namespace AccessInternal {
342 DEBUG_ONLY(void check_access_thread_state());
343 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
344 }
345
346 // Below is the implementation of the first 4 steps of the template pipeline:
347 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
348 // and sets default decorators to sensible values.
349 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
350 // multiple types. The P type of the address and T type of the value must
351 // match.
352 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
353 // avoided, and in that case avoids it (calling raw accesses or
354 // primitive accesses in a build that does not require primitive GC barriers)
355 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
356 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
357 // to the access.
358
488 };
489
490 template <DecoratorSet decorators, typename T>
491 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
492 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
493 static func_t _atomic_xchg_at_func;
494
495 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
496
497 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
498 assert_access_thread_state();
499 return _atomic_xchg_at_func(base, offset, new_value);
500 }
501 };
502
503 template <DecoratorSet decorators, typename T>
504 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
505 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
506 static func_t _arraycopy_func;
507
508 static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
509 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
510 size_t length);
511
512 static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
513 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
514 size_t length) {
515 assert_access_thread_state();
516 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
517 dst_obj, dst_offset_in_bytes, dst_raw,
518 length);
519 }
520 };
521
522 template <DecoratorSet decorators, typename T>
523 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
524 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
525 static func_t _clone_func;
526
527 static void clone_init(oop src, oop dst, size_t size);
528
529 static inline void clone(oop src, oop dst, size_t size) {
530 assert_access_thread_state();
531 _clone_func(src, dst, size);
532 }
533 };
534
535 // Initialize the function pointers to point to the resolving function.
536 template <DecoratorSet decorators, typename T>
537 typename AccessFunction<decorators, T, BARRIER_STORE>::type
538 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
539
540 template <DecoratorSet decorators, typename T>
541 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
542 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
543
544 template <DecoratorSet decorators, typename T>
545 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
546 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
547
548 template <DecoratorSet decorators, typename T>
549 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
550 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
551
552 template <DecoratorSet decorators, typename T>
553 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
554 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
556 template <DecoratorSet decorators, typename T>
557 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
558 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
559
560 template <DecoratorSet decorators, typename T>
561 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
562 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
563
564 template <DecoratorSet decorators, typename T>
565 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
566 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
567
568 template <DecoratorSet decorators, typename T>
569 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
570 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
571
572 template <DecoratorSet decorators, typename T>
573 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
574 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
575
576 // Step 3: Pre-runtime dispatching.
577 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
578 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
579 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
580 // not possible.
581 struct PreRuntimeDispatch: AllStatic {
582 template<DecoratorSet decorators>
583 struct CanHardwireRaw: public std::integral_constant<
584 bool,
585 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
586 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
587 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
588 {};
589
590 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
591
592 template<DecoratorSet decorators>
593 static bool is_hardwired_primitive() {
594 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
595 }
803 inline static typename EnableIf<
804 HasDecorator<decorators, AS_RAW>::value, T>::type
805 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
806 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
807 }
808
809 template <DecoratorSet decorators, typename T>
810 inline static typename EnableIf<
811 !HasDecorator<decorators, AS_RAW>::value, T>::type
812 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
813 if (is_hardwired_primitive<decorators>()) {
814 const DecoratorSet expanded_decorators = decorators | AS_RAW;
815 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
816 } else {
817 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
818 }
819 }
820
821 template <DecoratorSet decorators, typename T>
822 inline static typename EnableIf<
823 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
824 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
825 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
826 size_t length) {
827 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
828 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
829 return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
830 dst_obj, dst_offset_in_bytes, dst_raw,
831 length);
832 } else {
833 return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
834 dst_obj, dst_offset_in_bytes, dst_raw,
835 length);
836 }
837 }
838
839 template <DecoratorSet decorators, typename T>
840 inline static typename EnableIf<
841 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
842 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
843 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
844 size_t length) {
845 if (UseCompressedOops) {
846 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
847 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
848 dst_obj, dst_offset_in_bytes, dst_raw,
849 length);
850 } else {
851 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
852 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
853 dst_obj, dst_offset_in_bytes, dst_raw,
854 length);
855 }
856 }
857
858 template <DecoratorSet decorators, typename T>
859 inline static typename EnableIf<
860 !HasDecorator<decorators, AS_RAW>::value, bool>::type
861 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
862 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
863 size_t length) {
864 if (is_hardwired_primitive<decorators>()) {
865 const DecoratorSet expanded_decorators = decorators | AS_RAW;
866 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
867 dst_obj, dst_offset_in_bytes, dst_raw,
868 length);
869 } else {
870 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
871 dst_obj, dst_offset_in_bytes, dst_raw,
872 length);
873 }
874 }
875
876 template <DecoratorSet decorators>
877 inline static typename EnableIf<
878 HasDecorator<decorators, AS_RAW>::value>::type
879 clone(oop src, oop dst, size_t size) {
880 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
881 Raw::clone(src, dst, size);
882 }
883
884 template <DecoratorSet decorators>
885 inline static typename EnableIf<
886 !HasDecorator<decorators, AS_RAW>::value>::type
887 clone(oop src, oop dst, size_t size) {
888 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
889 }
890 };
891
892 // Step 2: Reduce types.
893 // Enforce that for non-oop types, T and P have to be strictly the same.
894 // P is the type of the address and T is the type of the values.
895 // As for oop types, it is allow to send T in {narrowOop, oop} and
896 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
897 // the subsequent table. (columns are P, rows are T)
898 // | | HeapWord | oop | narrowOop |
899 // | oop | rt-comp | hw-none | hw-comp |
900 // | narrowOop | x | x | hw-none |
901 //
902 // x means not allowed
903 // rt-comp means it must be checked at runtime whether the oop is compressed.
904 // hw-none means it is statically known the oop will not be compressed.
905 // hw-comp means it is statically known the oop will be compressed.
906
907 template <DecoratorSet decorators, typename T>
908 inline void store_reduce_types(T* addr, T value) {
909 PreRuntimeDispatch::store<decorators>(addr, value);
984
985 template <DecoratorSet decorators, typename T>
986 inline T load_reduce_types(T* addr) {
987 return PreRuntimeDispatch::load<decorators, T>(addr);
988 }
989
990 template <DecoratorSet decorators, typename T>
991 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
992 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
993 INTERNAL_RT_USE_COMPRESSED_OOPS;
994 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
995 }
996
997 template <DecoratorSet decorators, typename T>
998 inline oop load_reduce_types(HeapWord* addr) {
999 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1000 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1001 }
1002
1003 template <DecoratorSet decorators, typename T>
1004 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1005 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1006 size_t length) {
1007 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1008 dst_obj, dst_offset_in_bytes, dst_raw,
1009 length);
1010 }
1011
1012 template <DecoratorSet decorators>
1013 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1014 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1015 size_t length) {
1016 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1017 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1018 dst_obj, dst_offset_in_bytes, dst_raw,
1019 length);
1020 }
1021
1022 template <DecoratorSet decorators>
1023 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1024 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1025 size_t length) {
1026 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1027 INTERNAL_RT_USE_COMPRESSED_OOPS;
1028 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1029 dst_obj, dst_offset_in_bytes, dst_raw,
1030 length);
1031 }
1032
1033 // Step 1: Set default decorators. This step remembers if a type was volatile
1034 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1035 // memory ordering is set for the access, and the implied decorator rules
1036 // are applied to select sensible defaults for decorators that have not been
1037 // explicitly set. For example, default object referent strength is set to strong.
1038 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1039 // and references from the types). This step also perform some type verification
1040 // that the passed in types make sense.
1041
1042 template <DecoratorSet decorators, typename T>
1043 static void verify_types(){
1044 // If this fails to compile, then you have sent in something that is
1045 // not recognized as a valid primitive type to a primitive Access function.
1143 DecayedT new_decayed_value = new_value;
1144 // atomic_xchg is only available in SEQ_CST flavour.
1145 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1146 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1147 new_decayed_value);
1148 }
1149
1150 template <DecoratorSet decorators, typename T>
1151 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1152 verify_types<decorators, T>();
1153 using DecayedT = std::decay_t<T>;
1154 DecayedT new_decayed_value = new_value;
1155 // atomic_xchg is only available in SEQ_CST flavour.
1156 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1157 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1158 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1159 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1160 }
1161
1162 template <DecoratorSet decorators, typename T>
1163 inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1164 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1165 size_t length) {
1166 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1167 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1168 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1169 using DecayedT = std::decay_t<T>;
1170 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1171 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1172 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1173 length);
1174 }
1175
1176 template <DecoratorSet decorators>
1177 inline void clone(oop src, oop dst, size_t size) {
1178 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1179 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1180 }
1181
1182 // Infer the type that should be returned from an Access::oop_load.
1183 template <typename P, DecoratorSet decorators>
1184 class OopLoadProxy: public StackObj {
1185 private:
1186 P *const _addr;
1187 public:
1188 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1189
1190 inline operator oop() {
1191 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1192 }
1193
1194 inline operator narrowOop() {
1195 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1196 }
1197
1198 template <typename T>
1199 inline bool operator ==(const T& other) const {
1200 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1201 }
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/inlineKlass.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/globals.hpp"
36 #include "utilities/debug.hpp"
37 #include "utilities/globalDefinitions.hpp"
38
39 // Result from oop_arraycopy
40 enum class OopCopyResult {
41 ok, // oop array copy sucessful
42 failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
43 failed_check_null // oop array copy failed null check (ARRAYCOPY_NOTNULL)
44 };
45
46 // This metafunction returns either oop or narrowOop depending on whether
47 // an access needs to use compressed oops or not.
48 template <DecoratorSet decorators>
49 struct HeapOopType: AllStatic {
50 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
51 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
52 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
53 };
54
55 // This meta-function returns either oop or narrowOop depending on whether
56 // a back-end needs to consider compressed oops types or not.
57 template <DecoratorSet decorators>
58 struct ValueOopType: AllStatic {
59 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
60 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
61 };
62
63 namespace AccessInternal {
64 enum BarrierType {
65 BARRIER_STORE,
66 BARRIER_STORE_AT,
67 BARRIER_LOAD,
68 BARRIER_LOAD_AT,
69 BARRIER_ATOMIC_CMPXCHG,
70 BARRIER_ATOMIC_CMPXCHG_AT,
71 BARRIER_ATOMIC_XCHG,
72 BARRIER_ATOMIC_XCHG_AT,
73 BARRIER_ARRAYCOPY,
74 BARRIER_CLONE,
75 BARRIER_VALUE_COPY
76 };
77
78 template <DecoratorSet decorators, typename T>
79 struct MustConvertCompressedOop: public std::integral_constant<bool,
80 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
81 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
82 std::is_same<T, oop>::value> {};
83
84 // This metafunction returns an appropriate oop type if the value is oop-like
85 // and otherwise returns the same type T.
86 template <DecoratorSet decorators, typename T>
87 struct EncodedType: AllStatic {
88 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
89 typename HeapOopType<decorators>::type,
90 T>;
91 };
92
93 template <DecoratorSet decorators>
94 inline typename HeapOopType<decorators>::type*
95 oop_field_addr(oop base, ptrdiff_t byte_offset) {
96 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
97 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
98 }
99
100 template <DecoratorSet decorators, typename T>
101 struct AccessFunctionTypes {
102 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
103 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
104 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
105 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
106
107 typedef T (*load_func_t)(void* addr);
108 typedef void (*store_func_t)(void* addr, T value);
109 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
110 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
111
112 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
113 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
114 size_t length);
115 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
116 typedef void (*value_copy_func_t)(void* src, void* dst, InlineKlass* md, LayoutKind lk);
117 };
118
119 template <DecoratorSet decorators>
120 struct AccessFunctionTypes<decorators, void> {
121 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
122 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
123 size_t length);
124 };
125
126 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
127
128 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
129 template <DecoratorSet decorators, typename T> \
130 struct AccessFunction<decorators, T, bt>: AllStatic{ \
131 typedef typename AccessFunctionTypes<decorators, T>::func type; \
132 }
133 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
134 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
135 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
136 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
137 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
138 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
139 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
140 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
141 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
142 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
143 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
144 #undef ACCESS_GENERATE_ACCESS_FUNCTION
145
146 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
147 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
148
149 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
150 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
151
152 void* field_addr(oop base, ptrdiff_t offset);
153
154 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
155 // faster build times, given how frequently included access is.
156 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
157 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
158 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
159
160 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
161 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
162
163 template<typename T>
164 void arraycopy_conjoint(T* src, T* dst, size_t length);
165 template<typename T>
166 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
167 template<typename T>
168 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
169
170 void value_copy_internal(void* src, void* dst, size_t length);
171 }
172
173 // This mask specifies what decorators are relevant for raw accesses. When passing
174 // accesses to the raw layer, irrelevant decorators are removed.
175 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
176 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
177
178 // The RawAccessBarrier performs raw accesses with additional knowledge of
179 // memory ordering, so that OrderAccess/Atomic is called when necessary.
180 // It additionally handles compressed oops, and hence is not completely "raw"
181 // strictly speaking.
182 template <DecoratorSet decorators>
183 class RawAccessBarrier: public AllStatic {
184 protected:
185 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
186 return AccessInternal::field_addr(base, byte_offset);
187 }
188
189 protected:
190 // Only encode if INTERNAL_VALUE_IS_OOP
290 static inline void store(void* addr, T value) {
291 store_internal<decorators>(addr, value);
292 }
293
294 template <typename T>
295 static inline T load(void* addr) {
296 return load_internal<decorators, T>(addr);
297 }
298
299 template <typename T>
300 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
301 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
302 }
303
304 template <typename T>
305 static inline T atomic_xchg(void* addr, T new_value) {
306 return atomic_xchg_internal<decorators>(addr, new_value);
307 }
308
309 template <typename T>
310 static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
311 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
312 size_t length);
313
314 template <typename T>
315 static void oop_store(void* addr, T value);
316 template <typename T>
317 static void oop_store_at(oop base, ptrdiff_t offset, T value);
318
319 template <typename T>
320 static T oop_load(void* addr);
321 template <typename T>
322 static T oop_load_at(oop base, ptrdiff_t offset);
323
324 template <typename T>
325 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
326 template <typename T>
327 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
328
329 template <typename T>
330 static T oop_atomic_xchg(void* addr, T new_value);
335 static void store_at(oop base, ptrdiff_t offset, T value) {
336 store(field_addr(base, offset), value);
337 }
338
339 template <typename T>
340 static T load_at(oop base, ptrdiff_t offset) {
341 return load<T>(field_addr(base, offset));
342 }
343
344 template <typename T>
345 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
346 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
347 }
348
349 template <typename T>
350 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
351 return atomic_xchg(field_addr(base, offset), new_value);
352 }
353
354 template <typename T>
355 static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
356 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
357 size_t length);
358
359 static void clone(oop src, oop dst, size_t size);
360 static void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk);
361
362 };
363
364 namespace AccessInternal {
365 DEBUG_ONLY(void check_access_thread_state());
366 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
367 }
368
369 // Below is the implementation of the first 4 steps of the template pipeline:
370 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
371 // and sets default decorators to sensible values.
372 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
373 // multiple types. The P type of the address and T type of the value must
374 // match.
375 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
376 // avoided, and in that case avoids it (calling raw accesses or
377 // primitive accesses in a build that does not require primitive GC barriers)
378 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
379 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
380 // to the access.
381
511 };
512
513 template <DecoratorSet decorators, typename T>
514 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
515 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
516 static func_t _atomic_xchg_at_func;
517
518 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
519
520 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
521 assert_access_thread_state();
522 return _atomic_xchg_at_func(base, offset, new_value);
523 }
524 };
525
526 template <DecoratorSet decorators, typename T>
527 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
528 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
529 static func_t _arraycopy_func;
530
531 static OopCopyResult arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
532 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
533 size_t length);
534
535 static inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
536 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
537 size_t length) {
538 assert_access_thread_state();
539 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
540 dst_obj, dst_offset_in_bytes, dst_raw,
541 length);
542 }
543 };
544
545 template <DecoratorSet decorators, typename T>
546 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
547 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
548 static func_t _clone_func;
549
550 static void clone_init(oop src, oop dst, size_t size);
551
552 static inline void clone(oop src, oop dst, size_t size) {
553 assert_access_thread_state();
554 _clone_func(src, dst, size);
555 }
556 };
557
558 template <DecoratorSet decorators, typename T>
559 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
560 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
561 static func_t _value_copy_func;
562
563 static void value_copy_init(void* src, void* dst, InlineKlass* md, LayoutKind lk);
564
565 static inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
566 _value_copy_func(src, dst, md, lk);
567 }
568 };
569
570 // Initialize the function pointers to point to the resolving function.
571 template <DecoratorSet decorators, typename T>
572 typename AccessFunction<decorators, T, BARRIER_STORE>::type
573 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
574
575 template <DecoratorSet decorators, typename T>
576 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
577 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
578
579 template <DecoratorSet decorators, typename T>
580 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
581 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
582
583 template <DecoratorSet decorators, typename T>
584 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
585 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
586
587 template <DecoratorSet decorators, typename T>
588 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
589 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
591 template <DecoratorSet decorators, typename T>
592 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
593 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
594
595 template <DecoratorSet decorators, typename T>
596 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
597 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
598
599 template <DecoratorSet decorators, typename T>
600 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
601 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
602
603 template <DecoratorSet decorators, typename T>
604 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
605 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
606
607 template <DecoratorSet decorators, typename T>
608 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
609 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
610
611 template <DecoratorSet decorators, typename T>
612 typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
613 RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
614
615 // Step 3: Pre-runtime dispatching.
616 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
617 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
618 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
619 // not possible.
620 struct PreRuntimeDispatch: AllStatic {
621 template<DecoratorSet decorators>
622 struct CanHardwireRaw: public std::integral_constant<
623 bool,
624 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
625 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
626 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
627 {};
628
629 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
630
631 template<DecoratorSet decorators>
632 static bool is_hardwired_primitive() {
633 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
634 }
842 inline static typename EnableIf<
843 HasDecorator<decorators, AS_RAW>::value, T>::type
844 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
845 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
846 }
847
848 template <DecoratorSet decorators, typename T>
849 inline static typename EnableIf<
850 !HasDecorator<decorators, AS_RAW>::value, T>::type
851 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
852 if (is_hardwired_primitive<decorators>()) {
853 const DecoratorSet expanded_decorators = decorators | AS_RAW;
854 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
855 } else {
856 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
857 }
858 }
859
860 template <DecoratorSet decorators, typename T>
861 inline static typename EnableIf<
862 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, OopCopyResult>::type
863 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
864 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
865 size_t length) {
866 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
867 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
868 Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
869 dst_obj, dst_offset_in_bytes, dst_raw,
870 length);
871 } else {
872 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
873 dst_obj, dst_offset_in_bytes, dst_raw,
874 length);
875 }
876
877 return OopCopyResult::ok;
878 }
879
880 template <DecoratorSet decorators, typename T>
881 inline static typename EnableIf<
882 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, OopCopyResult>::type
883 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
884 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
885 size_t length) {
886 if (UseCompressedOops) {
887 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
888 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
889 dst_obj, dst_offset_in_bytes, dst_raw,
890 length);
891 } else {
892 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
893 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
894 dst_obj, dst_offset_in_bytes, dst_raw,
895 length);
896 }
897 }
898
899 template <DecoratorSet decorators, typename T>
900 inline static typename EnableIf<
901 !HasDecorator<decorators, AS_RAW>::value, OopCopyResult>::type
902 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
903 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
904 size_t length) {
905 if (is_hardwired_primitive<decorators>()) {
906 const DecoratorSet expanded_decorators = decorators | AS_RAW;
907 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
908 dst_obj, dst_offset_in_bytes, dst_raw,
909 length);
910 } else {
911 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
912 dst_obj, dst_offset_in_bytes, dst_raw,
913 length);
914 }
915 }
916
917 template <DecoratorSet decorators>
918 inline static typename EnableIf<
919 HasDecorator<decorators, AS_RAW>::value>::type
920 clone(oop src, oop dst, size_t size) {
921 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
922 Raw::clone(src, dst, size);
923 }
924
925 template <DecoratorSet decorators>
926 inline static typename EnableIf<
927 !HasDecorator<decorators, AS_RAW>::value>::type
928 clone(oop src, oop dst, size_t size) {
929 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
930 }
931
932 template <DecoratorSet decorators>
933 inline static typename EnableIf<
934 HasDecorator<decorators, AS_RAW>::value>::type
935 value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
936 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
937 Raw::value_copy(src, dst, md, lk);
938 }
939
940 template <DecoratorSet decorators>
941 inline static typename EnableIf<
942 !HasDecorator<decorators, AS_RAW>::value>::type
943 value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
944 const DecoratorSet expanded_decorators = decorators;
945 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md, lk);
946 }
947 };
948
949 // Step 2: Reduce types.
950 // Enforce that for non-oop types, T and P have to be strictly the same.
951 // P is the type of the address and T is the type of the values.
952 // As for oop types, it is allow to send T in {narrowOop, oop} and
953 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
954 // the subsequent table. (columns are P, rows are T)
955 // | | HeapWord | oop | narrowOop |
956 // | oop | rt-comp | hw-none | hw-comp |
957 // | narrowOop | x | x | hw-none |
958 //
959 // x means not allowed
960 // rt-comp means it must be checked at runtime whether the oop is compressed.
961 // hw-none means it is statically known the oop will not be compressed.
962 // hw-comp means it is statically known the oop will be compressed.
963
964 template <DecoratorSet decorators, typename T>
965 inline void store_reduce_types(T* addr, T value) {
966 PreRuntimeDispatch::store<decorators>(addr, value);
1041
1042 template <DecoratorSet decorators, typename T>
1043 inline T load_reduce_types(T* addr) {
1044 return PreRuntimeDispatch::load<decorators, T>(addr);
1045 }
1046
1047 template <DecoratorSet decorators, typename T>
1048 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1049 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1050 INTERNAL_RT_USE_COMPRESSED_OOPS;
1051 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1052 }
1053
1054 template <DecoratorSet decorators, typename T>
1055 inline oop load_reduce_types(HeapWord* addr) {
1056 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1057 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1058 }
1059
1060 template <DecoratorSet decorators, typename T>
1061 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1062 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1063 size_t length) {
1064 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1065 dst_obj, dst_offset_in_bytes, dst_raw,
1066 length);
1067 }
1068
1069 template <DecoratorSet decorators>
1070 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1071 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1072 size_t length) {
1073 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1074 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1075 dst_obj, dst_offset_in_bytes, dst_raw,
1076 length);
1077 }
1078
1079 template <DecoratorSet decorators>
1080 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1081 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1082 size_t length) {
1083 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1084 INTERNAL_RT_USE_COMPRESSED_OOPS;
1085 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1086 dst_obj, dst_offset_in_bytes, dst_raw,
1087 length);
1088 }
1089
1090 // Step 1: Set default decorators. This step remembers if a type was volatile
1091 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1092 // memory ordering is set for the access, and the implied decorator rules
1093 // are applied to select sensible defaults for decorators that have not been
1094 // explicitly set. For example, default object referent strength is set to strong.
1095 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1096 // and references from the types). This step also perform some type verification
1097 // that the passed in types make sense.
1098
1099 template <DecoratorSet decorators, typename T>
1100 static void verify_types(){
1101 // If this fails to compile, then you have sent in something that is
1102 // not recognized as a valid primitive type to a primitive Access function.
1200 DecayedT new_decayed_value = new_value;
1201 // atomic_xchg is only available in SEQ_CST flavour.
1202 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1203 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1204 new_decayed_value);
1205 }
1206
1207 template <DecoratorSet decorators, typename T>
1208 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1209 verify_types<decorators, T>();
1210 using DecayedT = std::decay_t<T>;
1211 DecayedT new_decayed_value = new_value;
1212 // atomic_xchg is only available in SEQ_CST flavour.
1213 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1214 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1215 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1216 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1217 }
1218
1219 template <DecoratorSet decorators, typename T>
1220 inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1221 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1222 size_t length) {
1223 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1224 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1225 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1226 using DecayedT = std::decay_t<T>;
1227 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1228 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1229 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1230 length);
1231 }
1232
1233 template <DecoratorSet decorators>
1234 inline void clone(oop src, oop dst, size_t size) {
1235 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1236 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1237 }
1238
1239 template <DecoratorSet decorators>
1240 inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
1241 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1242 PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md, lk);
1243 }
1244
1245 // Infer the type that should be returned from an Access::oop_load.
1246 template <typename P, DecoratorSet decorators>
1247 class OopLoadProxy: public StackObj {
1248 private:
1249 P *const _addr;
1250 public:
1251 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1252
1253 inline operator oop() {
1254 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1255 }
1256
1257 inline operator narrowOop() {
1258 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1259 }
1260
1261 template <typename T>
1262 inline bool operator ==(const T& other) const {
1263 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1264 }
|