28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/allocation.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "oops/accessDecorators.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "runtime/globals.hpp"
34 #include "utilities/debug.hpp"
35 #include "utilities/globalDefinitions.hpp"
36
37 #include <type_traits>
38
39 // This metafunction returns either oop or narrowOop depending on whether
40 // an access needs to use compressed oops or not.
41 template <DecoratorSet decorators>
42 struct HeapOopType: AllStatic {
43 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
44 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
45 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
46 };
47
48 namespace AccessInternal {
49 enum BarrierType {
50 BARRIER_STORE,
51 BARRIER_STORE_AT,
52 BARRIER_LOAD,
53 BARRIER_LOAD_AT,
54 BARRIER_ATOMIC_CMPXCHG,
55 BARRIER_ATOMIC_CMPXCHG_AT,
56 BARRIER_ATOMIC_XCHG,
57 BARRIER_ATOMIC_XCHG_AT,
58 BARRIER_ARRAYCOPY,
59 BARRIER_CLONE
60 };
61
62 template <DecoratorSet decorators, typename T>
63 struct MustConvertCompressedOop: public std::integral_constant<bool,
64 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
65 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
66 std::is_same<T, oop>::value> {};
67
68 // This metafunction returns an appropriate oop type if the value is oop-like
69 // and otherwise returns the same type T.
70 template <DecoratorSet decorators, typename T>
71 struct EncodedType: AllStatic {
72 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
73 typename HeapOopType<decorators>::type,
74 T>;
75 };
76
77 template <DecoratorSet decorators>
78 inline typename HeapOopType<decorators>::type*
79 oop_field_addr(oop base, ptrdiff_t byte_offset) {
80 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
81 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
82 }
83
84 template <DecoratorSet decorators, typename T>
85 struct AccessFunctionTypes {
86 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
87 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
88 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
89 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
90
91 typedef T (*load_func_t)(void* addr);
92 typedef void (*store_func_t)(void* addr, T value);
93 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
94 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
95
96 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
97 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
98 size_t length);
99 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
100 };
101
102 template <DecoratorSet decorators>
103 struct AccessFunctionTypes<decorators, void> {
104 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
105 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
106 size_t length);
107 };
108
109 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
110
111 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
112 template <DecoratorSet decorators, typename T> \
113 struct AccessFunction<decorators, T, bt>: AllStatic{ \
114 typedef typename AccessFunctionTypes<decorators, T>::func type; \
115 }
116 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
117 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
118 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
119 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
120 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
121 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
122 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
123 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
124 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
125 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
126 #undef ACCESS_GENERATE_ACCESS_FUNCTION
127
128 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
129 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
130
131 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
132 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
133
134 void* field_addr(oop base, ptrdiff_t offset);
135
136 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
137 // faster build times, given how frequently included access is.
138 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
139 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
140 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
141
142 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
143 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
144
145 template<typename T>
270 static inline void store(void* addr, T value) {
271 store_internal<decorators>(addr, value);
272 }
273
274 template <typename T>
275 static inline T load(void* addr) {
276 return load_internal<decorators, T>(addr);
277 }
278
279 template <typename T>
280 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
281 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
282 }
283
284 template <typename T>
285 static inline T atomic_xchg(void* addr, T new_value) {
286 return atomic_xchg_internal<decorators>(addr, new_value);
287 }
288
289 template <typename T>
290 static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
291 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
292 size_t length);
293
294 template <typename T>
295 static void oop_store(void* addr, T value);
296 template <typename T>
297 static void oop_store_at(oop base, ptrdiff_t offset, T value);
298
299 template <typename T>
300 static T oop_load(void* addr);
301 template <typename T>
302 static T oop_load_at(oop base, ptrdiff_t offset);
303
304 template <typename T>
305 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
306 template <typename T>
307 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
308
309 template <typename T>
310 static T oop_atomic_xchg(void* addr, T new_value);
315 static void store_at(oop base, ptrdiff_t offset, T value) {
316 store(field_addr(base, offset), value);
317 }
318
319 template <typename T>
320 static T load_at(oop base, ptrdiff_t offset) {
321 return load<T>(field_addr(base, offset));
322 }
323
324 template <typename T>
325 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
326 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
327 }
328
329 template <typename T>
330 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
331 return atomic_xchg(field_addr(base, offset), new_value);
332 }
333
334 template <typename T>
335 static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
336 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
337 size_t length);
338
339 static void clone(oop src, oop dst, size_t size);
340 };
341
342 namespace AccessInternal {
343 DEBUG_ONLY(void check_access_thread_state());
344 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
345 }
346
347 // Below is the implementation of the first 4 steps of the template pipeline:
348 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
349 // and sets default decorators to sensible values.
350 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
351 // multiple types. The P type of the address and T type of the value must
352 // match.
353 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
354 // avoided, and in that case avoids it (calling raw accesses or
355 // primitive accesses in a build that does not require primitive GC barriers)
356 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
357 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
358 // to the access.
359
489 };
490
491 template <DecoratorSet decorators, typename T>
492 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
493 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
494 static func_t _atomic_xchg_at_func;
495
496 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
497
498 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
499 assert_access_thread_state();
500 return _atomic_xchg_at_func(base, offset, new_value);
501 }
502 };
503
504 template <DecoratorSet decorators, typename T>
505 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
506 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
507 static func_t _arraycopy_func;
508
509 static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
510 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
511 size_t length);
512
513 static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
514 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
515 size_t length) {
516 assert_access_thread_state();
517 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
518 dst_obj, dst_offset_in_bytes, dst_raw,
519 length);
520 }
521 };
522
523 template <DecoratorSet decorators, typename T>
524 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
525 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
526 static func_t _clone_func;
527
528 static void clone_init(oop src, oop dst, size_t size);
529
530 static inline void clone(oop src, oop dst, size_t size) {
531 assert_access_thread_state();
532 _clone_func(src, dst, size);
533 }
534 };
535
536 // Initialize the function pointers to point to the resolving function.
537 template <DecoratorSet decorators, typename T>
538 typename AccessFunction<decorators, T, BARRIER_STORE>::type
539 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
540
541 template <DecoratorSet decorators, typename T>
542 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
543 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
544
545 template <DecoratorSet decorators, typename T>
546 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
547 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
548
549 template <DecoratorSet decorators, typename T>
550 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
551 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
552
553 template <DecoratorSet decorators, typename T>
554 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
555 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
557 template <DecoratorSet decorators, typename T>
558 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
559 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
560
561 template <DecoratorSet decorators, typename T>
562 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
563 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
564
565 template <DecoratorSet decorators, typename T>
566 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
567 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
568
569 template <DecoratorSet decorators, typename T>
570 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
571 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
572
573 template <DecoratorSet decorators, typename T>
574 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
575 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
576
577 // Step 3: Pre-runtime dispatching.
578 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
579 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
580 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
581 // not possible.
582 struct PreRuntimeDispatch: AllStatic {
583 template<DecoratorSet decorators>
584 struct CanHardwireRaw: public std::integral_constant<
585 bool,
586 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
587 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
588 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
589 {};
590
591 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
592
593 template<DecoratorSet decorators>
594 static bool is_hardwired_primitive() {
595 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
596 }
804 inline static typename EnableIf<
805 HasDecorator<decorators, AS_RAW>::value, T>::type
806 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
807 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
808 }
809
810 template <DecoratorSet decorators, typename T>
811 inline static typename EnableIf<
812 !HasDecorator<decorators, AS_RAW>::value, T>::type
813 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
814 if (is_hardwired_primitive<decorators>()) {
815 const DecoratorSet expanded_decorators = decorators | AS_RAW;
816 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
817 } else {
818 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
819 }
820 }
821
822 template <DecoratorSet decorators, typename T>
823 inline static typename EnableIf<
824 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
825 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
826 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
827 size_t length) {
828 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
829 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
830 return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
831 dst_obj, dst_offset_in_bytes, dst_raw,
832 length);
833 } else {
834 return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
835 dst_obj, dst_offset_in_bytes, dst_raw,
836 length);
837 }
838 }
839
840 template <DecoratorSet decorators, typename T>
841 inline static typename EnableIf<
842 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
843 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
844 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
845 size_t length) {
846 if (UseCompressedOops) {
847 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
848 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
849 dst_obj, dst_offset_in_bytes, dst_raw,
850 length);
851 } else {
852 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
853 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
854 dst_obj, dst_offset_in_bytes, dst_raw,
855 length);
856 }
857 }
858
859 template <DecoratorSet decorators, typename T>
860 inline static typename EnableIf<
861 !HasDecorator<decorators, AS_RAW>::value, bool>::type
862 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
863 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
864 size_t length) {
865 if (is_hardwired_primitive<decorators>()) {
866 const DecoratorSet expanded_decorators = decorators | AS_RAW;
867 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
868 dst_obj, dst_offset_in_bytes, dst_raw,
869 length);
870 } else {
871 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
872 dst_obj, dst_offset_in_bytes, dst_raw,
873 length);
874 }
875 }
876
877 template <DecoratorSet decorators>
878 inline static typename EnableIf<
879 HasDecorator<decorators, AS_RAW>::value>::type
880 clone(oop src, oop dst, size_t size) {
881 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
882 Raw::clone(src, dst, size);
883 }
884
885 template <DecoratorSet decorators>
886 inline static typename EnableIf<
887 !HasDecorator<decorators, AS_RAW>::value>::type
888 clone(oop src, oop dst, size_t size) {
889 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
890 }
891 };
892
893 // Step 2: Reduce types.
894 // Enforce that for non-oop types, T and P have to be strictly the same.
895 // P is the type of the address and T is the type of the values.
896 // As for oop types, it is allow to send T in {narrowOop, oop} and
897 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
898 // the subsequent table. (columns are P, rows are T)
899 // | | HeapWord | oop | narrowOop |
900 // | oop | rt-comp | hw-none | hw-comp |
901 // | narrowOop | x | x | hw-none |
902 //
903 // x means not allowed
904 // rt-comp means it must be checked at runtime whether the oop is compressed.
905 // hw-none means it is statically known the oop will not be compressed.
906 // hw-comp means it is statically known the oop will be compressed.
907
908 template <DecoratorSet decorators, typename T>
909 inline void store_reduce_types(T* addr, T value) {
910 PreRuntimeDispatch::store<decorators>(addr, value);
985
986 template <DecoratorSet decorators, typename T>
987 inline T load_reduce_types(T* addr) {
988 return PreRuntimeDispatch::load<decorators, T>(addr);
989 }
990
991 template <DecoratorSet decorators, typename T>
992 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
993 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
994 INTERNAL_RT_USE_COMPRESSED_OOPS;
995 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
996 }
997
998 template <DecoratorSet decorators, typename T>
999 inline oop load_reduce_types(HeapWord* addr) {
1000 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1001 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1002 }
1003
1004 template <DecoratorSet decorators, typename T>
1005 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1006 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1007 size_t length) {
1008 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1009 dst_obj, dst_offset_in_bytes, dst_raw,
1010 length);
1011 }
1012
1013 template <DecoratorSet decorators>
1014 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1015 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1016 size_t length) {
1017 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1018 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1019 dst_obj, dst_offset_in_bytes, dst_raw,
1020 length);
1021 }
1022
1023 template <DecoratorSet decorators>
1024 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1025 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1026 size_t length) {
1027 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1028 INTERNAL_RT_USE_COMPRESSED_OOPS;
1029 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1030 dst_obj, dst_offset_in_bytes, dst_raw,
1031 length);
1032 }
1033
1034 // Step 1: Set default decorators. This step remembers if a type was volatile
1035 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1036 // memory ordering is set for the access, and the implied decorator rules
1037 // are applied to select sensible defaults for decorators that have not been
1038 // explicitly set. For example, default object referent strength is set to strong.
1039 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1040 // and references from the types). This step also perform some type verification
1041 // that the passed in types make sense.
1042
1043 template <DecoratorSet decorators, typename T>
1044 static void verify_types(){
1045 // If this fails to compile, then you have sent in something that is
1046 // not recognized as a valid primitive type to a primitive Access function.
1047 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1048 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1049 std::is_floating_point<T>::value)); // not allowed primitive type
1050 }
1051
1144 DecayedT new_decayed_value = new_value;
1145 // atomic_xchg is only available in SEQ_CST flavour.
1146 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1147 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1148 new_decayed_value);
1149 }
1150
1151 template <DecoratorSet decorators, typename T>
1152 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1153 verify_types<decorators, T>();
1154 using DecayedT = std::decay_t<T>;
1155 DecayedT new_decayed_value = new_value;
1156 // atomic_xchg is only available in SEQ_CST flavour.
1157 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1158 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1159 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1160 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1161 }
1162
1163 template <DecoratorSet decorators, typename T>
1164 inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1165 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1166 size_t length) {
1167 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1168 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1169 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1170 using DecayedT = std::decay_t<T>;
1171 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1172 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1173 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1174 length);
1175 }
1176
1177 template <DecoratorSet decorators>
1178 inline void clone(oop src, oop dst, size_t size) {
1179 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1180 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1181 }
1182
1183 // Infer the type that should be returned from an Access::oop_load.
1184 template <typename P, DecoratorSet decorators>
1185 class OopLoadProxy: public StackObj {
1186 private:
1187 P *const _addr;
1188 public:
1189 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1190
1191 inline operator oop() {
1192 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1193 }
1194
1195 inline operator narrowOop() {
1196 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1197 }
1198
1199 template <typename T>
1200 inline bool operator ==(const T& other) const {
1201 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1202 }
|
28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/allocation.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "oops/accessDecorators.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "runtime/globals.hpp"
34 #include "utilities/debug.hpp"
35 #include "utilities/globalDefinitions.hpp"
36
37 #include <type_traits>
38
39 // This metafunction returns either oop or narrowOop depending on whether
40 // an access needs to use compressed oops or not.
41 template <DecoratorSet decorators>
42 struct HeapOopType: AllStatic {
43 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
44 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
45 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
46 };
47
48 // This meta-function returns either oop or narrowOop depending on whether
49 // a back-end needs to consider compressed oops types or not.
50 template <DecoratorSet decorators>
51 struct ValueOopType: AllStatic {
52 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
53 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
54 };
55
56 namespace AccessInternal {
57 enum BarrierType {
58 BARRIER_STORE,
59 BARRIER_STORE_AT,
60 BARRIER_LOAD,
61 BARRIER_LOAD_AT,
62 BARRIER_ATOMIC_CMPXCHG,
63 BARRIER_ATOMIC_CMPXCHG_AT,
64 BARRIER_ATOMIC_XCHG,
65 BARRIER_ATOMIC_XCHG_AT,
66 BARRIER_ARRAYCOPY,
67 BARRIER_CLONE,
68 BARRIER_VALUE_COPY
69 };
70
71 template <DecoratorSet decorators, typename T>
72 struct MustConvertCompressedOop: public std::integral_constant<bool,
73 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
74 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
75 std::is_same<T, oop>::value> {};
76
77 // This metafunction returns an appropriate oop type if the value is oop-like
78 // and otherwise returns the same type T.
79 template <DecoratorSet decorators, typename T>
80 struct EncodedType: AllStatic {
81 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
82 typename HeapOopType<decorators>::type,
83 T>;
84 };
85
86 template <DecoratorSet decorators>
87 inline typename HeapOopType<decorators>::type*
88 oop_field_addr(oop base, ptrdiff_t byte_offset) {
89 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
90 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
91 }
92
93 template <DecoratorSet decorators, typename T>
94 struct AccessFunctionTypes {
95 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
96 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
97 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
98 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
99
100 typedef T (*load_func_t)(void* addr);
101 typedef void (*store_func_t)(void* addr, T value);
102 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
103 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
104
105 typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
106 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
107 size_t length);
108 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
109 typedef void (*value_copy_func_t)(void* src, void* dst, InlineKlass* md);
110 };
111
112 template <DecoratorSet decorators>
113 struct AccessFunctionTypes<decorators, void> {
114 typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
115 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
116 size_t length);
117 };
118
119 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
120
121 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
122 template <DecoratorSet decorators, typename T> \
123 struct AccessFunction<decorators, T, bt>: AllStatic{ \
124 typedef typename AccessFunctionTypes<decorators, T>::func type; \
125 }
126 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
127 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
128 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
129 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
130 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
131 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
132 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
133 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
134 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
135 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
136 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
137 #undef ACCESS_GENERATE_ACCESS_FUNCTION
138
139 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
140 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
141
142 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
143 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
144
145 void* field_addr(oop base, ptrdiff_t offset);
146
147 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
148 // faster build times, given how frequently included access is.
149 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
150 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
151 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
152
153 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
154 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
155
156 template<typename T>
281 static inline void store(void* addr, T value) {
282 store_internal<decorators>(addr, value);
283 }
284
285 template <typename T>
286 static inline T load(void* addr) {
287 return load_internal<decorators, T>(addr);
288 }
289
290 template <typename T>
291 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
292 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
293 }
294
295 template <typename T>
296 static inline T atomic_xchg(void* addr, T new_value) {
297 return atomic_xchg_internal<decorators>(addr, new_value);
298 }
299
300 template <typename T>
301 static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
302 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
303 size_t length);
304
305 template <typename T>
306 static void oop_store(void* addr, T value);
307 template <typename T>
308 static void oop_store_at(oop base, ptrdiff_t offset, T value);
309
310 template <typename T>
311 static T oop_load(void* addr);
312 template <typename T>
313 static T oop_load_at(oop base, ptrdiff_t offset);
314
315 template <typename T>
316 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
317 template <typename T>
318 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
319
320 template <typename T>
321 static T oop_atomic_xchg(void* addr, T new_value);
326 static void store_at(oop base, ptrdiff_t offset, T value) {
327 store(field_addr(base, offset), value);
328 }
329
330 template <typename T>
331 static T load_at(oop base, ptrdiff_t offset) {
332 return load<T>(field_addr(base, offset));
333 }
334
335 template <typename T>
336 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
337 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
338 }
339
340 template <typename T>
341 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
342 return atomic_xchg(field_addr(base, offset), new_value);
343 }
344
345 template <typename T>
346 static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
347 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
348 size_t length);
349
350 static void clone(oop src, oop dst, size_t size);
351 static void value_copy(void* src, void* dst, InlineKlass* md);
352
353 };
354
355 namespace AccessInternal {
356 DEBUG_ONLY(void check_access_thread_state());
357 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
358 }
359
360 // Below is the implementation of the first 4 steps of the template pipeline:
361 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
362 // and sets default decorators to sensible values.
363 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
364 // multiple types. The P type of the address and T type of the value must
365 // match.
366 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
367 // avoided, and in that case avoids it (calling raw accesses or
368 // primitive accesses in a build that does not require primitive GC barriers)
369 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
370 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
371 // to the access.
372
502 };
503
504 template <DecoratorSet decorators, typename T>
505 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
506 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
507 static func_t _atomic_xchg_at_func;
508
509 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
510
511 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
512 assert_access_thread_state();
513 return _atomic_xchg_at_func(base, offset, new_value);
514 }
515 };
516
517 template <DecoratorSet decorators, typename T>
518 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
519 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
520 static func_t _arraycopy_func;
521
522 static void arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
523 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
524 size_t length);
525
526 static inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
527 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
528 size_t length) {
529 assert_access_thread_state();
530 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
531 dst_obj, dst_offset_in_bytes, dst_raw,
532 length);
533 }
534 };
535
536 template <DecoratorSet decorators, typename T>
537 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
538 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
539 static func_t _clone_func;
540
541 static void clone_init(oop src, oop dst, size_t size);
542
543 static inline void clone(oop src, oop dst, size_t size) {
544 assert_access_thread_state();
545 _clone_func(src, dst, size);
546 }
547 };
548
549 template <DecoratorSet decorators, typename T>
550 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
551 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
552 static func_t _value_copy_func;
553
554 static void value_copy_init(void* src, void* dst, InlineKlass* md);
555
556 static inline void value_copy(void* src, void* dst, InlineKlass* md) {
557 _value_copy_func(src, dst, md);
558 }
559 };
560
561 // Initialize the function pointers to point to the resolving function.
562 template <DecoratorSet decorators, typename T>
563 typename AccessFunction<decorators, T, BARRIER_STORE>::type
564 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
565
566 template <DecoratorSet decorators, typename T>
567 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
568 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
569
570 template <DecoratorSet decorators, typename T>
571 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
572 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
573
574 template <DecoratorSet decorators, typename T>
575 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
576 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
577
578 template <DecoratorSet decorators, typename T>
579 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
580 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
582 template <DecoratorSet decorators, typename T>
583 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
584 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
585
586 template <DecoratorSet decorators, typename T>
587 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
588 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
589
590 template <DecoratorSet decorators, typename T>
591 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
592 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
593
594 template <DecoratorSet decorators, typename T>
595 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
596 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
597
598 template <DecoratorSet decorators, typename T>
599 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
600 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
601
602 template <DecoratorSet decorators, typename T>
603 typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
604 RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
605
606 // Step 3: Pre-runtime dispatching.
607 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
608 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
609 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
610 // not possible.
611 struct PreRuntimeDispatch: AllStatic {
612 template<DecoratorSet decorators>
613 struct CanHardwireRaw: public std::integral_constant<
614 bool,
615 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
616 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
617 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
618 {};
619
620 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
621
622 template<DecoratorSet decorators>
623 static bool is_hardwired_primitive() {
624 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
625 }
833 inline static typename EnableIf<
834 HasDecorator<decorators, AS_RAW>::value, T>::type
835 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
836 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
837 }
838
839 template <DecoratorSet decorators, typename T>
840 inline static typename EnableIf<
841 !HasDecorator<decorators, AS_RAW>::value, T>::type
842 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
843 if (is_hardwired_primitive<decorators>()) {
844 const DecoratorSet expanded_decorators = decorators | AS_RAW;
845 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
846 } else {
847 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
848 }
849 }
850
851 template <DecoratorSet decorators, typename T>
852 inline static typename EnableIf<
853 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, void>::type
854 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
855 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
856 size_t length) {
857 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
858 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
859 Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
860 dst_obj, dst_offset_in_bytes, dst_raw,
861 length);
862 } else {
863 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
864 dst_obj, dst_offset_in_bytes, dst_raw,
865 length);
866 }
867 }
868
869 template <DecoratorSet decorators, typename T>
870 inline static typename EnableIf<
871 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, void>::type
872 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
873 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
874 size_t length) {
875 if (UseCompressedOops) {
876 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
877 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
878 dst_obj, dst_offset_in_bytes, dst_raw,
879 length);
880 } else {
881 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
882 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
883 dst_obj, dst_offset_in_bytes, dst_raw,
884 length);
885 }
886 }
887
888 template <DecoratorSet decorators, typename T>
889 inline static typename EnableIf<
890 !HasDecorator<decorators, AS_RAW>::value, void>::type
891 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
892 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
893 size_t length) {
894 if (is_hardwired_primitive<decorators>()) {
895 const DecoratorSet expanded_decorators = decorators | AS_RAW;
896 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
897 dst_obj, dst_offset_in_bytes, dst_raw,
898 length);
899 } else {
900 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
901 dst_obj, dst_offset_in_bytes, dst_raw,
902 length);
903 }
904 }
905
906 template <DecoratorSet decorators>
907 inline static typename EnableIf<
908 HasDecorator<decorators, AS_RAW>::value>::type
909 clone(oop src, oop dst, size_t size) {
910 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
911 Raw::clone(src, dst, size);
912 }
913
914 template <DecoratorSet decorators>
915 inline static typename EnableIf<
916 !HasDecorator<decorators, AS_RAW>::value>::type
917 clone(oop src, oop dst, size_t size) {
918 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
919 }
920
921 template <DecoratorSet decorators>
922 inline static typename EnableIf<
923 HasDecorator<decorators, AS_RAW>::value>::type
924 value_copy(void* src, void* dst, InlineKlass* md) {
925 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
926 Raw::value_copy(src, dst, md);
927 }
928
929 template <DecoratorSet decorators>
930 inline static typename EnableIf<
931 !HasDecorator<decorators, AS_RAW>::value>::type
932 value_copy(void* src, void* dst, InlineKlass* md) {
933 const DecoratorSet expanded_decorators = decorators;
934 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md);
935 }
936 };
937
938 // Step 2: Reduce types.
939 // Enforce that for non-oop types, T and P have to be strictly the same.
940 // P is the type of the address and T is the type of the values.
941 // As for oop types, it is allow to send T in {narrowOop, oop} and
942 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
943 // the subsequent table. (columns are P, rows are T)
944 // | | HeapWord | oop | narrowOop |
945 // | oop | rt-comp | hw-none | hw-comp |
946 // | narrowOop | x | x | hw-none |
947 //
948 // x means not allowed
949 // rt-comp means it must be checked at runtime whether the oop is compressed.
950 // hw-none means it is statically known the oop will not be compressed.
951 // hw-comp means it is statically known the oop will be compressed.
952
953 template <DecoratorSet decorators, typename T>
954 inline void store_reduce_types(T* addr, T value) {
955 PreRuntimeDispatch::store<decorators>(addr, value);
1030
1031 template <DecoratorSet decorators, typename T>
1032 inline T load_reduce_types(T* addr) {
1033 return PreRuntimeDispatch::load<decorators, T>(addr);
1034 }
1035
1036 template <DecoratorSet decorators, typename T>
1037 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1038 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1039 INTERNAL_RT_USE_COMPRESSED_OOPS;
1040 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1041 }
1042
1043 template <DecoratorSet decorators, typename T>
1044 inline oop load_reduce_types(HeapWord* addr) {
1045 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1046 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1047 }
1048
1049 template <DecoratorSet decorators, typename T>
1050 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1051 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1052 size_t length) {
1053 PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1054 dst_obj, dst_offset_in_bytes, dst_raw,
1055 length);
1056 }
1057
1058 template <DecoratorSet decorators>
1059 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1060 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1061 size_t length) {
1062 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1063 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1064 dst_obj, dst_offset_in_bytes, dst_raw,
1065 length);
1066 }
1067
1068 template <DecoratorSet decorators>
1069 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1070 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1071 size_t length) {
1072 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1073 INTERNAL_RT_USE_COMPRESSED_OOPS;
1074 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1075 dst_obj, dst_offset_in_bytes, dst_raw,
1076 length);
1077 }
1078
1079 // Step 1: Set default decorators. This step remembers if a type was volatile
1080 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1081 // memory ordering is set for the access, and the implied decorator rules
1082 // are applied to select sensible defaults for decorators that have not been
1083 // explicitly set. For example, default object referent strength is set to strong.
1084 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1085 // and references from the types). This step also perform some type verification
1086 // that the passed in types make sense.
1087
1088 template <DecoratorSet decorators, typename T>
1089 static void verify_types(){
1090 // If this fails to compile, then you have sent in something that is
1091 // not recognized as a valid primitive type to a primitive Access function.
1092 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1093 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1094 std::is_floating_point<T>::value)); // not allowed primitive type
1095 }
1096
1189 DecayedT new_decayed_value = new_value;
1190 // atomic_xchg is only available in SEQ_CST flavour.
1191 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1192 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1193 new_decayed_value);
1194 }
1195
1196 template <DecoratorSet decorators, typename T>
1197 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1198 verify_types<decorators, T>();
1199 using DecayedT = std::decay_t<T>;
1200 DecayedT new_decayed_value = new_value;
1201 // atomic_xchg is only available in SEQ_CST flavour.
1202 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1203 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1204 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1205 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1206 }
1207
1208 template <DecoratorSet decorators, typename T>
1209 inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1210 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1211 size_t length) {
1212 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1213 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1214 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1215 using DecayedT = std::decay_t<T>;
1216 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1217 arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1218 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1219 length);
1220 }
1221
1222 template <DecoratorSet decorators>
1223 inline void clone(oop src, oop dst, size_t size) {
1224 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1225 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1226 }
1227
1228 template <DecoratorSet decorators>
1229 inline void value_copy(void* src, void* dst, InlineKlass* md) {
1230 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1231 PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md);
1232 }
1233
1234 // Infer the type that should be returned from an Access::oop_load.
1235 template <typename P, DecoratorSet decorators>
1236 class OopLoadProxy: public StackObj {
1237 private:
1238 P *const _addr;
1239 public:
1240 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1241
1242 inline operator oop() {
1243 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1244 }
1245
1246 inline operator narrowOop() {
1247 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1248 }
1249
1250 template <typename T>
1251 inline bool operator ==(const T& other) const {
1252 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1253 }
|