< prev index next >

src/hotspot/share/oops/accessBackend.hpp

Print this page

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
  26 #define SHARE_OOPS_ACCESSBACKEND_HPP
  27 
  28 #include "cppstdlib/type_traits.hpp"
  29 #include "gc/shared/barrierSetConfig.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "metaprogramming/enableIf.hpp"
  32 #include "oops/accessDecorators.hpp"

  33 #include "oops/oopsHierarchy.hpp"
  34 #include "runtime/globals.hpp"
  35 #include "utilities/debug.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 
  38 // Result from oop_arraycopy
  39 enum class OopCopyResult {
  40   ok,                      // oop array copy sucessful
  41   failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
  42   failed_check_null        // oop array copy failed null check (ARRAYCOPY_NOTNULL)
  43 };
  44 
  45 // This metafunction returns either oop or narrowOop depending on whether
  46 // an access needs to use compressed oops or not.
  47 template <DecoratorSet decorators>
  48 struct HeapOopType: AllStatic {
  49   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
  50                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  51   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  52 };
  53 








  54 namespace AccessInternal {
  55   enum BarrierType {
  56     BARRIER_STORE,
  57     BARRIER_STORE_AT,
  58     BARRIER_LOAD,
  59     BARRIER_LOAD_AT,
  60     BARRIER_ATOMIC_CMPXCHG,
  61     BARRIER_ATOMIC_CMPXCHG_AT,
  62     BARRIER_ATOMIC_XCHG,
  63     BARRIER_ATOMIC_XCHG_AT,
  64     BARRIER_ARRAYCOPY,
  65     BARRIER_CLONE

  66   };
  67 
  68   template <DecoratorSet decorators, typename T>
  69   struct MustConvertCompressedOop: public std::integral_constant<bool,
  70     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
  71     std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
  72     std::is_same<T, oop>::value> {};
  73 
  74   // This metafunction returns an appropriate oop type if the value is oop-like
  75   // and otherwise returns the same type T.
  76   template <DecoratorSet decorators, typename T>
  77   struct EncodedType: AllStatic {
  78     using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
  79                                     typename HeapOopType<decorators>::type,
  80                                     T>;
  81   };
  82 
  83   template <DecoratorSet decorators>
  84   inline typename HeapOopType<decorators>::type*
  85   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  86     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  87              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  88   }
  89 
  90   template <DecoratorSet decorators, typename T>
  91   struct AccessFunctionTypes {
  92     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
  93     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
  94     typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
  95     typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
  96 
  97     typedef T (*load_func_t)(void* addr);
  98     typedef void (*store_func_t)(void* addr, T value);
  99     typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
 100     typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
 101 
 102     typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 103                                               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 104                                               size_t length);
 105     typedef void (*clone_func_t)(oop src, oop dst, size_t size);

 106   };
 107 
 108   template <DecoratorSet decorators>
 109   struct AccessFunctionTypes<decorators, void> {
 110     typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 111                                               arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 112                                               size_t length);
 113   };
 114 
 115   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 116 
 117 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 118   template <DecoratorSet decorators, typename T>                    \
 119   struct AccessFunction<decorators, T, bt>: AllStatic{              \
 120     typedef typename AccessFunctionTypes<decorators, T>::func type; \
 121   }
 122   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
 123   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
 124   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
 125   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
 126   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
 127   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
 128   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
 129   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
 130   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
 131   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);

 132 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 133 
 134   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 135   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
 136 
 137   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 138   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
 139 
 140   void* field_addr(oop base, ptrdiff_t offset);
 141 
 142   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
 143   // faster build times, given how frequently included access is.
 144   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
 145   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
 146   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
 147 
 148   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
 149   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
 150 
 151   template<typename T>
 152   void arraycopy_conjoint(T* src, T* dst, size_t length);
 153   template<typename T>
 154   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
 155   template<typename T>
 156   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);


 157 }
 158 
 159 // This mask specifies what decorators are relevant for raw accesses. When passing
 160 // accesses to the raw layer, irrelevant decorators are removed.
 161 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
 162                                         ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
 163 
 164 // The RawAccessBarrier performs raw accesses with additional knowledge of
 165 // memory ordering, so that OrderAccess/Atomic is called when necessary.
 166 // It additionally handles compressed oops, and hence is not completely "raw"
 167 // strictly speaking.
 168 template <DecoratorSet decorators>
 169 class RawAccessBarrier: public AllStatic {
 170 protected:
 171   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 172     return AccessInternal::field_addr(base, byte_offset);
 173   }
 174 
 175 protected:
 176   // Only encode if INTERNAL_VALUE_IS_OOP

 276   static inline void store(void* addr, T value) {
 277     store_internal<decorators>(addr, value);
 278   }
 279 
 280   template <typename T>
 281   static inline T load(void* addr) {
 282     return load_internal<decorators, T>(addr);
 283   }
 284 
 285   template <typename T>
 286   static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 287     return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
 288   }
 289 
 290   template <typename T>
 291   static inline T atomic_xchg(void* addr, T new_value) {
 292     return atomic_xchg_internal<decorators>(addr, new_value);
 293   }
 294 
 295   template <typename T>
 296   static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 297                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 298                         size_t length);
 299 
 300   template <typename T>
 301   static void oop_store(void* addr, T value);
 302   template <typename T>
 303   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 304 
 305   template <typename T>
 306   static T oop_load(void* addr);
 307   template <typename T>
 308   static T oop_load_at(oop base, ptrdiff_t offset);
 309 
 310   template <typename T>
 311   static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
 312   template <typename T>
 313   static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
 314 
 315   template <typename T>
 316   static T oop_atomic_xchg(void* addr, T new_value);

 321   static void store_at(oop base, ptrdiff_t offset, T value) {
 322     store(field_addr(base, offset), value);
 323   }
 324 
 325   template <typename T>
 326   static T load_at(oop base, ptrdiff_t offset) {
 327     return load<T>(field_addr(base, offset));
 328   }
 329 
 330   template <typename T>
 331   static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 332     return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
 333   }
 334 
 335   template <typename T>
 336   static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 337     return atomic_xchg(field_addr(base, offset), new_value);
 338   }
 339 
 340   template <typename T>
 341   static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 342                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 343                             size_t length);
 344 
 345   static void clone(oop src, oop dst, size_t size);


 346 };
 347 
 348 namespace AccessInternal {
 349   DEBUG_ONLY(void check_access_thread_state());
 350 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
 351 }
 352 
 353 // Below is the implementation of the first 4 steps of the template pipeline:
 354 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 355 //           and sets default decorators to sensible values.
 356 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 357 //           multiple types. The P type of the address and T type of the value must
 358 //           match.
 359 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 360 //           avoided, and in that case avoids it (calling raw accesses or
 361 //           primitive accesses in a build that does not require primitive GC barriers)
 362 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
 363 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
 364 //           to the access.
 365 

 522       assert_access_thread_state();
 523       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 524                              dst_obj, dst_offset_in_bytes, dst_raw,
 525                              length);
 526     }
 527   };
 528 
 529   template <DecoratorSet decorators, typename T>
 530   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 531     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 532     static func_t _clone_func;
 533 
 534     static void clone_init(oop src, oop dst, size_t size);
 535 
 536     static inline void clone(oop src, oop dst, size_t size) {
 537       assert_access_thread_state();
 538       _clone_func(src, dst, size);
 539     }
 540   };
 541 












 542   // Initialize the function pointers to point to the resolving function.
 543   template <DecoratorSet decorators, typename T>
 544   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 545   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 546 
 547   template <DecoratorSet decorators, typename T>
 548   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 549   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 550 
 551   template <DecoratorSet decorators, typename T>
 552   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 553   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 554 
 555   template <DecoratorSet decorators, typename T>
 556   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 557   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 558 
 559   template <DecoratorSet decorators, typename T>
 560   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 561   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;

 563   template <DecoratorSet decorators, typename T>
 564   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 565   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 566 
 567   template <DecoratorSet decorators, typename T>
 568   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 569   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 570 
 571   template <DecoratorSet decorators, typename T>
 572   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 573   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 574 
 575   template <DecoratorSet decorators, typename T>
 576   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 577   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 578 
 579   template <DecoratorSet decorators, typename T>
 580   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 581   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 582 




 583   // Step 3: Pre-runtime dispatching.
 584   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 585   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 586   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 587   // not possible.
 588   struct PreRuntimeDispatch: AllStatic {
 589     template<DecoratorSet decorators>
 590     struct CanHardwireRaw: public std::integral_constant<
 591       bool,
 592       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 593       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 594       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 595     {};
 596 
 597     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 598 
 599     template<DecoratorSet decorators>
 600     static bool is_hardwired_primitive() {
 601       return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 602     }

 879         return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 880                                                                             dst_obj, dst_offset_in_bytes, dst_raw,
 881                                                                             length);
 882       }
 883     }
 884 
 885     template <DecoratorSet decorators>
 886     inline static typename EnableIf<
 887       HasDecorator<decorators, AS_RAW>::value>::type
 888     clone(oop src, oop dst, size_t size) {
 889       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 890       Raw::clone(src, dst, size);
 891     }
 892 
 893     template <DecoratorSet decorators>
 894     inline static typename EnableIf<
 895       !HasDecorator<decorators, AS_RAW>::value>::type
 896     clone(oop src, oop dst, size_t size) {
 897       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 898     }
















 899   };
 900 
 901   // Step 2: Reduce types.
 902   // Enforce that for non-oop types, T and P have to be strictly the same.
 903   // P is the type of the address and T is the type of the values.
 904   // As for oop types, it is allow to send T in {narrowOop, oop} and
 905   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 906   // the subsequent table. (columns are P, rows are T)
 907   // |           | HeapWord  |   oop   | narrowOop |
 908   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 909   // | narrowOop |     x     |    x    |  hw-none  |
 910   //
 911   // x means not allowed
 912   // rt-comp means it must be checked at runtime whether the oop is compressed.
 913   // hw-none means it is statically known the oop will not be compressed.
 914   // hw-comp means it is statically known the oop will be compressed.
 915 
 916   template <DecoratorSet decorators, typename T>
 917   inline void store_reduce_types(T* addr, T value) {
 918     PreRuntimeDispatch::store<decorators>(addr, value);

1171   template <DecoratorSet decorators, typename T>
1172   inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1173                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1174                                  size_t length) {
1175     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1176                    (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1177                     std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1178     using DecayedT = std::decay_t<T>;
1179     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1180     return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1181                                                        dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1182                                                        length);
1183   }
1184 
1185   template <DecoratorSet decorators>
1186   inline void clone(oop src, oop dst, size_t size) {
1187     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1188     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1189   }
1190 






1191   // Infer the type that should be returned from an Access::oop_load.
1192   template <typename P, DecoratorSet decorators>
1193   class OopLoadProxy: public StackObj {
1194   private:
1195     P *const _addr;
1196   public:
1197     explicit OopLoadProxy(P* addr) : _addr(addr) {}
1198 
1199     inline operator oop() {
1200       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1201     }
1202 
1203     inline operator narrowOop() {
1204       return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1205     }
1206 
1207     template <typename T>
1208     inline bool operator ==(const T& other) const {
1209       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1210     }

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
  26 #define SHARE_OOPS_ACCESSBACKEND_HPP
  27 
  28 #include "cppstdlib/type_traits.hpp"
  29 #include "gc/shared/barrierSetConfig.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "metaprogramming/enableIf.hpp"
  32 #include "oops/accessDecorators.hpp"
  33 #include "oops/inlineKlass.hpp"
  34 #include "oops/oopsHierarchy.hpp"
  35 #include "runtime/globals.hpp"
  36 #include "utilities/debug.hpp"
  37 #include "utilities/globalDefinitions.hpp"
  38 
  39 // Result from oop_arraycopy
  40 enum class OopCopyResult {
  41   ok,                      // oop array copy sucessful
  42   failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
  43   failed_check_null        // oop array copy failed null check (ARRAYCOPY_NOTNULL)
  44 };
  45 
  46 // This metafunction returns either oop or narrowOop depending on whether
  47 // an access needs to use compressed oops or not.
  48 template <DecoratorSet decorators>
  49 struct HeapOopType: AllStatic {
  50   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
  51                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  52   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  53 };
  54 
  55 // This meta-function returns either oop or narrowOop depending on whether
  56 // a back-end needs to consider compressed oops types or not.
  57 template <DecoratorSet decorators>
  58 struct ValueOopType: AllStatic {
  59   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  60   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  61 };
  62 
  63 namespace AccessInternal {
  64   enum BarrierType {
  65     BARRIER_STORE,
  66     BARRIER_STORE_AT,
  67     BARRIER_LOAD,
  68     BARRIER_LOAD_AT,
  69     BARRIER_ATOMIC_CMPXCHG,
  70     BARRIER_ATOMIC_CMPXCHG_AT,
  71     BARRIER_ATOMIC_XCHG,
  72     BARRIER_ATOMIC_XCHG_AT,
  73     BARRIER_ARRAYCOPY,
  74     BARRIER_CLONE,
  75     BARRIER_VALUE_COPY
  76   };
  77 
  78   template <DecoratorSet decorators, typename T>
  79   struct MustConvertCompressedOop: public std::integral_constant<bool,
  80     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
  81     std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
  82     std::is_same<T, oop>::value> {};
  83 
  84   // This metafunction returns an appropriate oop type if the value is oop-like
  85   // and otherwise returns the same type T.
  86   template <DecoratorSet decorators, typename T>
  87   struct EncodedType: AllStatic {
  88     using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
  89                                     typename HeapOopType<decorators>::type,
  90                                     T>;
  91   };
  92 
  93   template <DecoratorSet decorators>
  94   inline typename HeapOopType<decorators>::type*
  95   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  96     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  97              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  98   }
  99 
 100   template <DecoratorSet decorators, typename T>
 101   struct AccessFunctionTypes {
 102     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
 103     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
 104     typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
 105     typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
 106 
 107     typedef T (*load_func_t)(void* addr);
 108     typedef void (*store_func_t)(void* addr, T value);
 109     typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
 110     typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
 111 
 112     typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 113                                               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 114                                               size_t length);
 115     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
 116     typedef void (*value_copy_func_t)(void* src, void* dst, InlineKlass* md, LayoutKind lk);
 117   };
 118 
 119   template <DecoratorSet decorators>
 120   struct AccessFunctionTypes<decorators, void> {
 121     typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 122                                               arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 123                                               size_t length);
 124   };
 125 
 126   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 127 
 128 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 129   template <DecoratorSet decorators, typename T>                    \
 130   struct AccessFunction<decorators, T, bt>: AllStatic{              \
 131     typedef typename AccessFunctionTypes<decorators, T>::func type; \
 132   }
 133   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
 134   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
 135   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
 136   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
 137   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
 138   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
 139   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
 140   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
 141   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
 142   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
 143   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
 144 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 145 
 146   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 147   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
 148 
 149   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 150   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
 151 
 152   void* field_addr(oop base, ptrdiff_t offset);
 153 
 154   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
 155   // faster build times, given how frequently included access is.
 156   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
 157   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
 158   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
 159 
 160   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
 161   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
 162 
 163   template<typename T>
 164   void arraycopy_conjoint(T* src, T* dst, size_t length);
 165   template<typename T>
 166   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
 167   template<typename T>
 168   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
 169 
 170   void value_copy_internal(void* src, void* dst, size_t length);
 171 }
 172 
 173 // This mask specifies what decorators are relevant for raw accesses. When passing
 174 // accesses to the raw layer, irrelevant decorators are removed.
 175 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
 176                                         ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
 177 
 178 // The RawAccessBarrier performs raw accesses with additional knowledge of
 179 // memory ordering, so that OrderAccess/Atomic is called when necessary.
 180 // It additionally handles compressed oops, and hence is not completely "raw"
 181 // strictly speaking.
 182 template <DecoratorSet decorators>
 183 class RawAccessBarrier: public AllStatic {
 184 protected:
 185   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 186     return AccessInternal::field_addr(base, byte_offset);
 187   }
 188 
 189 protected:
 190   // Only encode if INTERNAL_VALUE_IS_OOP

 290   static inline void store(void* addr, T value) {
 291     store_internal<decorators>(addr, value);
 292   }
 293 
 294   template <typename T>
 295   static inline T load(void* addr) {
 296     return load_internal<decorators, T>(addr);
 297   }
 298 
 299   template <typename T>
 300   static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 301     return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
 302   }
 303 
 304   template <typename T>
 305   static inline T atomic_xchg(void* addr, T new_value) {
 306     return atomic_xchg_internal<decorators>(addr, new_value);
 307   }
 308 
 309   template <typename T>
 310   static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 311                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 312                         size_t length);
 313 
 314   template <typename T>
 315   static void oop_store(void* addr, T value);
 316   template <typename T>
 317   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 318 
 319   template <typename T>
 320   static T oop_load(void* addr);
 321   template <typename T>
 322   static T oop_load_at(oop base, ptrdiff_t offset);
 323 
 324   template <typename T>
 325   static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
 326   template <typename T>
 327   static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
 328 
 329   template <typename T>
 330   static T oop_atomic_xchg(void* addr, T new_value);

 335   static void store_at(oop base, ptrdiff_t offset, T value) {
 336     store(field_addr(base, offset), value);
 337   }
 338 
 339   template <typename T>
 340   static T load_at(oop base, ptrdiff_t offset) {
 341     return load<T>(field_addr(base, offset));
 342   }
 343 
 344   template <typename T>
 345   static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 346     return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
 347   }
 348 
 349   template <typename T>
 350   static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 351     return atomic_xchg(field_addr(base, offset), new_value);
 352   }
 353 
 354   template <typename T>
 355   static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 356                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 357                             size_t length);
 358 
 359   static void clone(oop src, oop dst, size_t size);
 360   static void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk);
 361 
 362 };
 363 
 364 namespace AccessInternal {
 365   DEBUG_ONLY(void check_access_thread_state());
 366 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
 367 }
 368 
 369 // Below is the implementation of the first 4 steps of the template pipeline:
 370 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 371 //           and sets default decorators to sensible values.
 372 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 373 //           multiple types. The P type of the address and T type of the value must
 374 //           match.
 375 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 376 //           avoided, and in that case avoids it (calling raw accesses or
 377 //           primitive accesses in a build that does not require primitive GC barriers)
 378 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
 379 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
 380 //           to the access.
 381 

 538       assert_access_thread_state();
 539       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 540                              dst_obj, dst_offset_in_bytes, dst_raw,
 541                              length);
 542     }
 543   };
 544 
 545   template <DecoratorSet decorators, typename T>
 546   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 547     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 548     static func_t _clone_func;
 549 
 550     static void clone_init(oop src, oop dst, size_t size);
 551 
 552     static inline void clone(oop src, oop dst, size_t size) {
 553       assert_access_thread_state();
 554       _clone_func(src, dst, size);
 555     }
 556   };
 557 
 558   template <DecoratorSet decorators, typename T>
 559   struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
 560     typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
 561     static func_t _value_copy_func;
 562 
 563     static void value_copy_init(void* src, void* dst, InlineKlass* md, LayoutKind lk);
 564 
 565     static inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
 566       _value_copy_func(src, dst, md, lk);
 567     }
 568   };
 569 
 570   // Initialize the function pointers to point to the resolving function.
 571   template <DecoratorSet decorators, typename T>
 572   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 573   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 574 
 575   template <DecoratorSet decorators, typename T>
 576   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 577   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 578 
 579   template <DecoratorSet decorators, typename T>
 580   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 581   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 582 
 583   template <DecoratorSet decorators, typename T>
 584   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 585   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 586 
 587   template <DecoratorSet decorators, typename T>
 588   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 589   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;

 591   template <DecoratorSet decorators, typename T>
 592   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 593   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 594 
 595   template <DecoratorSet decorators, typename T>
 596   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 597   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 598 
 599   template <DecoratorSet decorators, typename T>
 600   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 601   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 602 
 603   template <DecoratorSet decorators, typename T>
 604   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 605   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 606 
 607   template <DecoratorSet decorators, typename T>
 608   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 609   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 610 
 611   template <DecoratorSet decorators, typename T>
 612   typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
 613   RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
 614 
 615   // Step 3: Pre-runtime dispatching.
 616   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 617   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 618   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 619   // not possible.
 620   struct PreRuntimeDispatch: AllStatic {
 621     template<DecoratorSet decorators>
 622     struct CanHardwireRaw: public std::integral_constant<
 623       bool,
 624       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 625       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 626       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 627     {};
 628 
 629     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 630 
 631     template<DecoratorSet decorators>
 632     static bool is_hardwired_primitive() {
 633       return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 634     }

 911         return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 912                                                                             dst_obj, dst_offset_in_bytes, dst_raw,
 913                                                                             length);
 914       }
 915     }
 916 
 917     template <DecoratorSet decorators>
 918     inline static typename EnableIf<
 919       HasDecorator<decorators, AS_RAW>::value>::type
 920     clone(oop src, oop dst, size_t size) {
 921       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 922       Raw::clone(src, dst, size);
 923     }
 924 
 925     template <DecoratorSet decorators>
 926     inline static typename EnableIf<
 927       !HasDecorator<decorators, AS_RAW>::value>::type
 928     clone(oop src, oop dst, size_t size) {
 929       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 930     }
 931 
 932     template <DecoratorSet decorators>
 933     inline static typename EnableIf<
 934       HasDecorator<decorators, AS_RAW>::value>::type
 935     value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
 936       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 937       Raw::value_copy(src, dst, md, lk);
 938     }
 939 
 940     template <DecoratorSet decorators>
 941     inline static typename EnableIf<
 942       !HasDecorator<decorators, AS_RAW>::value>::type
 943       value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
 944       const DecoratorSet expanded_decorators = decorators;
 945       RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md, lk);
 946     }
 947   };
 948 
 949   // Step 2: Reduce types.
 950   // Enforce that for non-oop types, T and P have to be strictly the same.
 951   // P is the type of the address and T is the type of the values.
 952   // As for oop types, it is allow to send T in {narrowOop, oop} and
 953   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 954   // the subsequent table. (columns are P, rows are T)
 955   // |           | HeapWord  |   oop   | narrowOop |
 956   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 957   // | narrowOop |     x     |    x    |  hw-none  |
 958   //
 959   // x means not allowed
 960   // rt-comp means it must be checked at runtime whether the oop is compressed.
 961   // hw-none means it is statically known the oop will not be compressed.
 962   // hw-comp means it is statically known the oop will be compressed.
 963 
 964   template <DecoratorSet decorators, typename T>
 965   inline void store_reduce_types(T* addr, T value) {
 966     PreRuntimeDispatch::store<decorators>(addr, value);

1219   template <DecoratorSet decorators, typename T>
1220   inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1221                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1222                                  size_t length) {
1223     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1224                    (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1225                     std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1226     using DecayedT = std::decay_t<T>;
1227     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1228     return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1229                                                        dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1230                                                        length);
1231   }
1232 
1233   template <DecoratorSet decorators>
1234   inline void clone(oop src, oop dst, size_t size) {
1235     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1236     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1237   }
1238 
1239   template <DecoratorSet decorators>
1240   inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
1241     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1242     PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md, lk);
1243   }
1244 
1245   // Infer the type that should be returned from an Access::oop_load.
1246   template <typename P, DecoratorSet decorators>
1247   class OopLoadProxy: public StackObj {
1248   private:
1249     P *const _addr;
1250   public:
1251     explicit OopLoadProxy(P* addr) : _addr(addr) {}
1252 
1253     inline operator oop() {
1254       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1255     }
1256 
1257     inline operator narrowOop() {
1258       return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1259     }
1260 
1261     template <typename T>
1262     inline bool operator ==(const T& other) const {
1263       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1264     }
< prev index next >