28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/allocation.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "oops/accessDecorators.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "runtime/globals.hpp"
34 #include "utilities/debug.hpp"
35 #include "utilities/globalDefinitions.hpp"
36
37 #include <type_traits>
38
39 // This metafunction returns either oop or narrowOop depending on whether
40 // an access needs to use compressed oops or not.
41 template <DecoratorSet decorators>
42 struct HeapOopType: AllStatic {
43 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
44 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
45 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
46 };
47
48 namespace AccessInternal {
49 enum BarrierType {
50 BARRIER_STORE,
51 BARRIER_STORE_AT,
52 BARRIER_LOAD,
53 BARRIER_LOAD_AT,
54 BARRIER_ATOMIC_CMPXCHG,
55 BARRIER_ATOMIC_CMPXCHG_AT,
56 BARRIER_ATOMIC_XCHG,
57 BARRIER_ATOMIC_XCHG_AT,
58 BARRIER_ARRAYCOPY,
59 BARRIER_CLONE
60 };
61
62 template <DecoratorSet decorators, typename T>
63 struct MustConvertCompressedOop: public std::integral_constant<bool,
64 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
65 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
66 std::is_same<T, oop>::value> {};
67
68 // This metafunction returns an appropriate oop type if the value is oop-like
69 // and otherwise returns the same type T.
70 template <DecoratorSet decorators, typename T>
71 struct EncodedType: AllStatic {
72 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
73 typename HeapOopType<decorators>::type,
74 T>;
75 };
76
77 template <DecoratorSet decorators>
78 inline typename HeapOopType<decorators>::type*
79 oop_field_addr(oop base, ptrdiff_t byte_offset) {
85 // locking to support wide atomics or not.
86 template <typename T>
87 #ifdef SUPPORTS_NATIVE_CX8
88 struct PossiblyLockedAccess: public std::false_type {};
89 #else
90 struct PossiblyLockedAccess: public std::integral_constant<bool, (sizeof(T) > 4)> {};
91 #endif
92
93 template <DecoratorSet decorators, typename T>
94 struct AccessFunctionTypes {
95 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
96 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
97 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
98 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
99
100 typedef T (*load_func_t)(void* addr);
101 typedef void (*store_func_t)(void* addr, T value);
102 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
103 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
104
105 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
106 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
107 size_t length);
108 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
109 };
110
111 template <DecoratorSet decorators>
112 struct AccessFunctionTypes<decorators, void> {
113 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
114 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
115 size_t length);
116 };
117
118 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
119
120 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
121 template <DecoratorSet decorators, typename T> \
122 struct AccessFunction<decorators, T, bt>: AllStatic{ \
123 typedef typename AccessFunctionTypes<decorators, T>::func type; \
124 }
125 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
126 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
127 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
128 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
129 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
130 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
131 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
132 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
133 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
134 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
135 #undef ACCESS_GENERATE_ACCESS_FUNCTION
136
137 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
138 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
139
140 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
141 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
142
143 class AccessLocker {
144 public:
145 AccessLocker();
146 ~AccessLocker();
147 };
148 bool wide_atomic_needs_locking();
149
150 void* field_addr(oop base, ptrdiff_t offset);
151
152 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
153 // faster build times, given how frequently included access is.
154 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
314 static inline void store(void* addr, T value) {
315 store_internal<decorators>(addr, value);
316 }
317
318 template <typename T>
319 static inline T load(void* addr) {
320 return load_internal<decorators, T>(addr);
321 }
322
323 template <typename T>
324 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
325 return atomic_cmpxchg_maybe_locked<decorators>(addr, compare_value, new_value);
326 }
327
328 template <typename T>
329 static inline T atomic_xchg(void* addr, T new_value) {
330 return atomic_xchg_maybe_locked<decorators>(addr, new_value);
331 }
332
333 template <typename T>
334 static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
335 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
336 size_t length);
337
338 template <typename T>
339 static void oop_store(void* addr, T value);
340 template <typename T>
341 static void oop_store_at(oop base, ptrdiff_t offset, T value);
342
343 template <typename T>
344 static T oop_load(void* addr);
345 template <typename T>
346 static T oop_load_at(oop base, ptrdiff_t offset);
347
348 template <typename T>
349 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
350 template <typename T>
351 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
352
353 template <typename T>
354 static T oop_atomic_xchg(void* addr, T new_value);
359 static void store_at(oop base, ptrdiff_t offset, T value) {
360 store(field_addr(base, offset), value);
361 }
362
363 template <typename T>
364 static T load_at(oop base, ptrdiff_t offset) {
365 return load<T>(field_addr(base, offset));
366 }
367
368 template <typename T>
369 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
370 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
371 }
372
373 template <typename T>
374 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
375 return atomic_xchg(field_addr(base, offset), new_value);
376 }
377
378 template <typename T>
379 static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
380 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
381 size_t length);
382
383 static void clone(oop src, oop dst, size_t size);
384 };
385
386 namespace AccessInternal {
387 DEBUG_ONLY(void check_access_thread_state());
388 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
389 }
390
391 // Below is the implementation of the first 4 steps of the template pipeline:
392 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
393 // and sets default decorators to sensible values.
394 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
395 // multiple types. The P type of the address and T type of the value must
396 // match.
397 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
398 // avoided, and in that case avoids it (calling raw accesses or
399 // primitive accesses in a build that does not require primitive GC barriers)
400 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
401 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
402 // to the access.
403
533 };
534
535 template <DecoratorSet decorators, typename T>
536 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
537 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
538 static func_t _atomic_xchg_at_func;
539
540 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
541
542 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
543 assert_access_thread_state();
544 return _atomic_xchg_at_func(base, offset, new_value);
545 }
546 };
547
548 template <DecoratorSet decorators, typename T>
549 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
550 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
551 static func_t _arraycopy_func;
552
553 static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
554 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
555 size_t length);
556
557 static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
558 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
559 size_t length) {
560 assert_access_thread_state();
561 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
562 dst_obj, dst_offset_in_bytes, dst_raw,
563 length);
564 }
565 };
566
567 template <DecoratorSet decorators, typename T>
568 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
569 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
570 static func_t _clone_func;
571
572 static void clone_init(oop src, oop dst, size_t size);
573
574 static inline void clone(oop src, oop dst, size_t size) {
575 assert_access_thread_state();
576 _clone_func(src, dst, size);
577 }
578 };
579
580 // Initialize the function pointers to point to the resolving function.
581 template <DecoratorSet decorators, typename T>
582 typename AccessFunction<decorators, T, BARRIER_STORE>::type
583 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
584
585 template <DecoratorSet decorators, typename T>
586 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
587 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
588
589 template <DecoratorSet decorators, typename T>
590 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
591 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
592
593 template <DecoratorSet decorators, typename T>
594 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
595 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
596
597 template <DecoratorSet decorators, typename T>
598 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
599 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
601 template <DecoratorSet decorators, typename T>
602 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
603 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
604
605 template <DecoratorSet decorators, typename T>
606 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
607 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
608
609 template <DecoratorSet decorators, typename T>
610 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
611 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
612
613 template <DecoratorSet decorators, typename T>
614 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
615 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
616
617 template <DecoratorSet decorators, typename T>
618 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
619 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
620
621 // Step 3: Pre-runtime dispatching.
622 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
623 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
624 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
625 // not possible.
626 struct PreRuntimeDispatch: AllStatic {
627 template<DecoratorSet decorators>
628 struct CanHardwireRaw: public std::integral_constant<
629 bool,
630 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
631 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
632 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
633 {};
634
635 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
636
637 template<DecoratorSet decorators>
638 static bool is_hardwired_primitive() {
639 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
640 }
848 inline static typename EnableIf<
849 HasDecorator<decorators, AS_RAW>::value, T>::type
850 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
851 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
852 }
853
854 template <DecoratorSet decorators, typename T>
855 inline static typename EnableIf<
856 !HasDecorator<decorators, AS_RAW>::value, T>::type
857 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
858 if (is_hardwired_primitive<decorators>()) {
859 const DecoratorSet expanded_decorators = decorators | AS_RAW;
860 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
861 } else {
862 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
863 }
864 }
865
866 template <DecoratorSet decorators, typename T>
867 inline static typename EnableIf<
868 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
869 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
870 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
871 size_t length) {
872 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
873 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
874 return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
875 dst_obj, dst_offset_in_bytes, dst_raw,
876 length);
877 } else {
878 return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
879 dst_obj, dst_offset_in_bytes, dst_raw,
880 length);
881 }
882 }
883
884 template <DecoratorSet decorators, typename T>
885 inline static typename EnableIf<
886 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
887 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
888 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
889 size_t length) {
890 if (UseCompressedOops) {
891 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
892 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
893 dst_obj, dst_offset_in_bytes, dst_raw,
894 length);
895 } else {
896 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
897 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
898 dst_obj, dst_offset_in_bytes, dst_raw,
899 length);
900 }
901 }
902
903 template <DecoratorSet decorators, typename T>
904 inline static typename EnableIf<
905 !HasDecorator<decorators, AS_RAW>::value, bool>::type
906 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
907 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
908 size_t length) {
909 if (is_hardwired_primitive<decorators>()) {
910 const DecoratorSet expanded_decorators = decorators | AS_RAW;
911 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
912 dst_obj, dst_offset_in_bytes, dst_raw,
913 length);
914 } else {
915 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
916 dst_obj, dst_offset_in_bytes, dst_raw,
917 length);
918 }
919 }
920
921 template <DecoratorSet decorators>
922 inline static typename EnableIf<
923 HasDecorator<decorators, AS_RAW>::value>::type
924 clone(oop src, oop dst, size_t size) {
925 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
926 Raw::clone(src, dst, size);
927 }
928
929 template <DecoratorSet decorators>
930 inline static typename EnableIf<
931 !HasDecorator<decorators, AS_RAW>::value>::type
932 clone(oop src, oop dst, size_t size) {
933 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
934 }
935 };
936
937 // Step 2: Reduce types.
938 // Enforce that for non-oop types, T and P have to be strictly the same.
939 // P is the type of the address and T is the type of the values.
940 // As for oop types, it is allow to send T in {narrowOop, oop} and
941 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
942 // the subsequent table. (columns are P, rows are T)
943 // | | HeapWord | oop | narrowOop |
944 // | oop | rt-comp | hw-none | hw-comp |
945 // | narrowOop | x | x | hw-none |
946 //
947 // x means not allowed
948 // rt-comp means it must be checked at runtime whether the oop is compressed.
949 // hw-none means it is statically known the oop will not be compressed.
950 // hw-comp means it is statically known the oop will be compressed.
951
952 template <DecoratorSet decorators, typename T>
953 inline void store_reduce_types(T* addr, T value) {
954 PreRuntimeDispatch::store<decorators>(addr, value);
1029
1030 template <DecoratorSet decorators, typename T>
1031 inline T load_reduce_types(T* addr) {
1032 return PreRuntimeDispatch::load<decorators, T>(addr);
1033 }
1034
1035 template <DecoratorSet decorators, typename T>
1036 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1037 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1038 INTERNAL_RT_USE_COMPRESSED_OOPS;
1039 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1040 }
1041
1042 template <DecoratorSet decorators, typename T>
1043 inline oop load_reduce_types(HeapWord* addr) {
1044 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1045 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1046 }
1047
1048 template <DecoratorSet decorators, typename T>
1049 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1050 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1051 size_t length) {
1052 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1053 dst_obj, dst_offset_in_bytes, dst_raw,
1054 length);
1055 }
1056
1057 template <DecoratorSet decorators>
1058 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1059 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1060 size_t length) {
1061 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1062 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1063 dst_obj, dst_offset_in_bytes, dst_raw,
1064 length);
1065 }
1066
1067 template <DecoratorSet decorators>
1068 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1069 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1070 size_t length) {
1071 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1072 INTERNAL_RT_USE_COMPRESSED_OOPS;
1073 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1074 dst_obj, dst_offset_in_bytes, dst_raw,
1075 length);
1076 }
1077
1078 // Step 1: Set default decorators. This step remembers if a type was volatile
1079 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1080 // memory ordering is set for the access, and the implied decorator rules
1081 // are applied to select sensible defaults for decorators that have not been
1082 // explicitly set. For example, default object referent strength is set to strong.
1083 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1084 // and references from the types). This step also perform some type verification
1085 // that the passed in types make sense.
1086
1087 template <DecoratorSet decorators, typename T>
1088 static void verify_types(){
1089 // If this fails to compile, then you have sent in something that is
1090 // not recognized as a valid primitive type to a primitive Access function.
1091 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1092 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1093 std::is_floating_point<T>::value)); // not allowed primitive type
1094 }
1095
1188 DecayedT new_decayed_value = new_value;
1189 // atomic_xchg is only available in SEQ_CST flavour.
1190 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1191 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1192 new_decayed_value);
1193 }
1194
1195 template <DecoratorSet decorators, typename T>
1196 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1197 verify_types<decorators, T>();
1198 using DecayedT = std::decay_t<T>;
1199 DecayedT new_decayed_value = new_value;
1200 // atomic_xchg is only available in SEQ_CST flavour.
1201 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1202 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1203 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1204 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1205 }
1206
1207 template <DecoratorSet decorators, typename T>
1208 inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1209 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1210 size_t length) {
1211 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1212 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1213 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1214 using DecayedT = std::decay_t<T>;
1215 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1216 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1217 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1218 length);
1219 }
1220
1221 template <DecoratorSet decorators>
1222 inline void clone(oop src, oop dst, size_t size) {
1223 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1224 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1225 }
1226
1227 // Infer the type that should be returned from an Access::oop_load.
1228 template <typename P, DecoratorSet decorators>
1229 class OopLoadProxy: public StackObj {
1230 private:
1231 P *const _addr;
1232 public:
1233 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1234
1235 inline operator oop() {
1236 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1237 }
1238
1239 inline operator narrowOop() {
1240 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1241 }
1242
1243 template <typename T>
1244 inline bool operator ==(const T& other) const {
1245 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1246 }
|
28 #include "gc/shared/barrierSetConfig.hpp"
29 #include "memory/allocation.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "oops/accessDecorators.hpp"
32 #include "oops/oopsHierarchy.hpp"
33 #include "runtime/globals.hpp"
34 #include "utilities/debug.hpp"
35 #include "utilities/globalDefinitions.hpp"
36
37 #include <type_traits>
38
39 // This metafunction returns either oop or narrowOop depending on whether
40 // an access needs to use compressed oops or not.
41 template <DecoratorSet decorators>
42 struct HeapOopType: AllStatic {
43 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
44 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
45 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
46 };
47
48 // This meta-function returns either oop or narrowOop depending on whether
49 // a back-end needs to consider compressed oops types or not.
50 template <DecoratorSet decorators>
51 struct ValueOopType: AllStatic {
52 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
53 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
54 };
55
56 namespace AccessInternal {
57 enum BarrierType {
58 BARRIER_STORE,
59 BARRIER_STORE_AT,
60 BARRIER_LOAD,
61 BARRIER_LOAD_AT,
62 BARRIER_ATOMIC_CMPXCHG,
63 BARRIER_ATOMIC_CMPXCHG_AT,
64 BARRIER_ATOMIC_XCHG,
65 BARRIER_ATOMIC_XCHG_AT,
66 BARRIER_ARRAYCOPY,
67 BARRIER_CLONE,
68 BARRIER_VALUE_COPY
69 };
70
71 template <DecoratorSet decorators, typename T>
72 struct MustConvertCompressedOop: public std::integral_constant<bool,
73 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
74 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
75 std::is_same<T, oop>::value> {};
76
77 // This metafunction returns an appropriate oop type if the value is oop-like
78 // and otherwise returns the same type T.
79 template <DecoratorSet decorators, typename T>
80 struct EncodedType: AllStatic {
81 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
82 typename HeapOopType<decorators>::type,
83 T>;
84 };
85
86 template <DecoratorSet decorators>
87 inline typename HeapOopType<decorators>::type*
88 oop_field_addr(oop base, ptrdiff_t byte_offset) {
94 // locking to support wide atomics or not.
95 template <typename T>
96 #ifdef SUPPORTS_NATIVE_CX8
97 struct PossiblyLockedAccess: public std::false_type {};
98 #else
99 struct PossiblyLockedAccess: public std::integral_constant<bool, (sizeof(T) > 4)> {};
100 #endif
101
102 template <DecoratorSet decorators, typename T>
103 struct AccessFunctionTypes {
104 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
105 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
106 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
107 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
108
109 typedef T (*load_func_t)(void* addr);
110 typedef void (*store_func_t)(void* addr, T value);
111 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
112 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
113
114 typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
115 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
116 size_t length);
117 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
118 typedef void (*value_copy_func_t)(void* src, void* dst, InlineKlass* md);
119 };
120
121 template <DecoratorSet decorators>
122 struct AccessFunctionTypes<decorators, void> {
123 typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
124 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
125 size_t length);
126 };
127
128 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
129
130 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
131 template <DecoratorSet decorators, typename T> \
132 struct AccessFunction<decorators, T, bt>: AllStatic{ \
133 typedef typename AccessFunctionTypes<decorators, T>::func type; \
134 }
135 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
136 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
137 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
138 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
139 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
140 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
141 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
142 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
143 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
144 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
145 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
146 #undef ACCESS_GENERATE_ACCESS_FUNCTION
147
148 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
149 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
150
151 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
152 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
153
154 class AccessLocker {
155 public:
156 AccessLocker();
157 ~AccessLocker();
158 };
159 bool wide_atomic_needs_locking();
160
161 void* field_addr(oop base, ptrdiff_t offset);
162
163 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
164 // faster build times, given how frequently included access is.
165 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
325 static inline void store(void* addr, T value) {
326 store_internal<decorators>(addr, value);
327 }
328
329 template <typename T>
330 static inline T load(void* addr) {
331 return load_internal<decorators, T>(addr);
332 }
333
334 template <typename T>
335 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
336 return atomic_cmpxchg_maybe_locked<decorators>(addr, compare_value, new_value);
337 }
338
339 template <typename T>
340 static inline T atomic_xchg(void* addr, T new_value) {
341 return atomic_xchg_maybe_locked<decorators>(addr, new_value);
342 }
343
344 template <typename T>
345 static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
346 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
347 size_t length);
348
349 template <typename T>
350 static void oop_store(void* addr, T value);
351 template <typename T>
352 static void oop_store_at(oop base, ptrdiff_t offset, T value);
353
354 template <typename T>
355 static T oop_load(void* addr);
356 template <typename T>
357 static T oop_load_at(oop base, ptrdiff_t offset);
358
359 template <typename T>
360 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
361 template <typename T>
362 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
363
364 template <typename T>
365 static T oop_atomic_xchg(void* addr, T new_value);
370 static void store_at(oop base, ptrdiff_t offset, T value) {
371 store(field_addr(base, offset), value);
372 }
373
374 template <typename T>
375 static T load_at(oop base, ptrdiff_t offset) {
376 return load<T>(field_addr(base, offset));
377 }
378
379 template <typename T>
380 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
381 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
382 }
383
384 template <typename T>
385 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
386 return atomic_xchg(field_addr(base, offset), new_value);
387 }
388
389 template <typename T>
390 static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
391 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
392 size_t length);
393
394 static void clone(oop src, oop dst, size_t size);
395 static void value_copy(void* src, void* dst, InlineKlass* md);
396
397 };
398
399 namespace AccessInternal {
400 DEBUG_ONLY(void check_access_thread_state());
401 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
402 }
403
404 // Below is the implementation of the first 4 steps of the template pipeline:
405 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
406 // and sets default decorators to sensible values.
407 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
408 // multiple types. The P type of the address and T type of the value must
409 // match.
410 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
411 // avoided, and in that case avoids it (calling raw accesses or
412 // primitive accesses in a build that does not require primitive GC barriers)
413 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
414 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
415 // to the access.
416
546 };
547
548 template <DecoratorSet decorators, typename T>
549 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
550 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
551 static func_t _atomic_xchg_at_func;
552
553 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
554
555 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
556 assert_access_thread_state();
557 return _atomic_xchg_at_func(base, offset, new_value);
558 }
559 };
560
561 template <DecoratorSet decorators, typename T>
562 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
563 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
564 static func_t _arraycopy_func;
565
566 static void arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
567 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
568 size_t length);
569
570 static inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
571 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
572 size_t length) {
573 assert_access_thread_state();
574 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
575 dst_obj, dst_offset_in_bytes, dst_raw,
576 length);
577 }
578 };
579
580 template <DecoratorSet decorators, typename T>
581 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
582 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
583 static func_t _clone_func;
584
585 static void clone_init(oop src, oop dst, size_t size);
586
587 static inline void clone(oop src, oop dst, size_t size) {
588 assert_access_thread_state();
589 _clone_func(src, dst, size);
590 }
591 };
592
593 template <DecoratorSet decorators, typename T>
594 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
595 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
596 static func_t _value_copy_func;
597
598 static void value_copy_init(void* src, void* dst, InlineKlass* md);
599
600 static inline void value_copy(void* src, void* dst, InlineKlass* md) {
601 _value_copy_func(src, dst, md);
602 }
603 };
604
605 // Initialize the function pointers to point to the resolving function.
606 template <DecoratorSet decorators, typename T>
607 typename AccessFunction<decorators, T, BARRIER_STORE>::type
608 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
609
610 template <DecoratorSet decorators, typename T>
611 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
612 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
613
614 template <DecoratorSet decorators, typename T>
615 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
616 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
617
618 template <DecoratorSet decorators, typename T>
619 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
620 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
621
622 template <DecoratorSet decorators, typename T>
623 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
624 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
626 template <DecoratorSet decorators, typename T>
627 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
628 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
629
630 template <DecoratorSet decorators, typename T>
631 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
632 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
633
634 template <DecoratorSet decorators, typename T>
635 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
636 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
637
638 template <DecoratorSet decorators, typename T>
639 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
640 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
641
642 template <DecoratorSet decorators, typename T>
643 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
644 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
645
646 template <DecoratorSet decorators, typename T>
647 typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
648 RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
649
650 // Step 3: Pre-runtime dispatching.
651 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
652 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
653 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
654 // not possible.
655 struct PreRuntimeDispatch: AllStatic {
656 template<DecoratorSet decorators>
657 struct CanHardwireRaw: public std::integral_constant<
658 bool,
659 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
660 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
661 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
662 {};
663
664 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
665
666 template<DecoratorSet decorators>
667 static bool is_hardwired_primitive() {
668 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
669 }
877 inline static typename EnableIf<
878 HasDecorator<decorators, AS_RAW>::value, T>::type
879 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
880 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
881 }
882
883 template <DecoratorSet decorators, typename T>
884 inline static typename EnableIf<
885 !HasDecorator<decorators, AS_RAW>::value, T>::type
886 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
887 if (is_hardwired_primitive<decorators>()) {
888 const DecoratorSet expanded_decorators = decorators | AS_RAW;
889 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
890 } else {
891 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
892 }
893 }
894
895 template <DecoratorSet decorators, typename T>
896 inline static typename EnableIf<
897 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, void>::type
898 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
899 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
900 size_t length) {
901 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
902 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
903 Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
904 dst_obj, dst_offset_in_bytes, dst_raw,
905 length);
906 } else {
907 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
908 dst_obj, dst_offset_in_bytes, dst_raw,
909 length);
910 }
911 }
912
913 template <DecoratorSet decorators, typename T>
914 inline static typename EnableIf<
915 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, void>::type
916 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
917 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
918 size_t length) {
919 if (UseCompressedOops) {
920 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
921 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
922 dst_obj, dst_offset_in_bytes, dst_raw,
923 length);
924 } else {
925 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
926 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
927 dst_obj, dst_offset_in_bytes, dst_raw,
928 length);
929 }
930 }
931
932 template <DecoratorSet decorators, typename T>
933 inline static typename EnableIf<
934 !HasDecorator<decorators, AS_RAW>::value, void>::type
935 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
936 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
937 size_t length) {
938 if (is_hardwired_primitive<decorators>()) {
939 const DecoratorSet expanded_decorators = decorators | AS_RAW;
940 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
941 dst_obj, dst_offset_in_bytes, dst_raw,
942 length);
943 } else {
944 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
945 dst_obj, dst_offset_in_bytes, dst_raw,
946 length);
947 }
948 }
949
950 template <DecoratorSet decorators>
951 inline static typename EnableIf<
952 HasDecorator<decorators, AS_RAW>::value>::type
953 clone(oop src, oop dst, size_t size) {
954 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
955 Raw::clone(src, dst, size);
956 }
957
958 template <DecoratorSet decorators>
959 inline static typename EnableIf<
960 !HasDecorator<decorators, AS_RAW>::value>::type
961 clone(oop src, oop dst, size_t size) {
962 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
963 }
964
965 template <DecoratorSet decorators>
966 inline static typename EnableIf<
967 HasDecorator<decorators, AS_RAW>::value>::type
968 value_copy(void* src, void* dst, InlineKlass* md) {
969 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
970 Raw::value_copy(src, dst, md);
971 }
972
973 template <DecoratorSet decorators>
974 inline static typename EnableIf<
975 !HasDecorator<decorators, AS_RAW>::value>::type
976 value_copy(void* src, void* dst, InlineKlass* md) {
977 const DecoratorSet expanded_decorators = decorators;
978 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md);
979 }
980 };
981
982 // Step 2: Reduce types.
983 // Enforce that for non-oop types, T and P have to be strictly the same.
984 // P is the type of the address and T is the type of the values.
985 // As for oop types, it is allow to send T in {narrowOop, oop} and
986 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
987 // the subsequent table. (columns are P, rows are T)
988 // | | HeapWord | oop | narrowOop |
989 // | oop | rt-comp | hw-none | hw-comp |
990 // | narrowOop | x | x | hw-none |
991 //
992 // x means not allowed
993 // rt-comp means it must be checked at runtime whether the oop is compressed.
994 // hw-none means it is statically known the oop will not be compressed.
995 // hw-comp means it is statically known the oop will be compressed.
996
997 template <DecoratorSet decorators, typename T>
998 inline void store_reduce_types(T* addr, T value) {
999 PreRuntimeDispatch::store<decorators>(addr, value);
1074
1075 template <DecoratorSet decorators, typename T>
1076 inline T load_reduce_types(T* addr) {
1077 return PreRuntimeDispatch::load<decorators, T>(addr);
1078 }
1079
1080 template <DecoratorSet decorators, typename T>
1081 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1082 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1083 INTERNAL_RT_USE_COMPRESSED_OOPS;
1084 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1085 }
1086
1087 template <DecoratorSet decorators, typename T>
1088 inline oop load_reduce_types(HeapWord* addr) {
1089 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1090 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1091 }
1092
1093 template <DecoratorSet decorators, typename T>
1094 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1095 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1096 size_t length) {
1097 PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1098 dst_obj, dst_offset_in_bytes, dst_raw,
1099 length);
1100 }
1101
1102 template <DecoratorSet decorators>
1103 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1104 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1105 size_t length) {
1106 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1107 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1108 dst_obj, dst_offset_in_bytes, dst_raw,
1109 length);
1110 }
1111
1112 template <DecoratorSet decorators>
1113 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1114 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1115 size_t length) {
1116 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1117 INTERNAL_RT_USE_COMPRESSED_OOPS;
1118 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1119 dst_obj, dst_offset_in_bytes, dst_raw,
1120 length);
1121 }
1122
1123 // Step 1: Set default decorators. This step remembers if a type was volatile
1124 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1125 // memory ordering is set for the access, and the implied decorator rules
1126 // are applied to select sensible defaults for decorators that have not been
1127 // explicitly set. For example, default object referent strength is set to strong.
1128 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1129 // and references from the types). This step also perform some type verification
1130 // that the passed in types make sense.
1131
1132 template <DecoratorSet decorators, typename T>
1133 static void verify_types(){
1134 // If this fails to compile, then you have sent in something that is
1135 // not recognized as a valid primitive type to a primitive Access function.
1136 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1137 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1138 std::is_floating_point<T>::value)); // not allowed primitive type
1139 }
1140
1233 DecayedT new_decayed_value = new_value;
1234 // atomic_xchg is only available in SEQ_CST flavour.
1235 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1236 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1237 new_decayed_value);
1238 }
1239
1240 template <DecoratorSet decorators, typename T>
1241 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1242 verify_types<decorators, T>();
1243 using DecayedT = std::decay_t<T>;
1244 DecayedT new_decayed_value = new_value;
1245 // atomic_xchg is only available in SEQ_CST flavour.
1246 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1247 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1248 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1249 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1250 }
1251
1252 template <DecoratorSet decorators, typename T>
1253 inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1254 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1255 size_t length) {
1256 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1257 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1258 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1259 using DecayedT = std::decay_t<T>;
1260 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1261 arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1262 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1263 length);
1264 }
1265
1266 template <DecoratorSet decorators>
1267 inline void clone(oop src, oop dst, size_t size) {
1268 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1269 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1270 }
1271
1272 template <DecoratorSet decorators>
1273 inline void value_copy(void* src, void* dst, InlineKlass* md) {
1274 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1275 PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md);
1276 }
1277
1278 // Infer the type that should be returned from an Access::oop_load.
1279 template <typename P, DecoratorSet decorators>
1280 class OopLoadProxy: public StackObj {
1281 private:
1282 P *const _addr;
1283 public:
1284 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1285
1286 inline operator oop() {
1287 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1288 }
1289
1290 inline operator narrowOop() {
1291 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1292 }
1293
1294 template <typename T>
1295 inline bool operator ==(const T& other) const {
1296 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1297 }
|