< prev index next >

src/hotspot/share/oops/accessBackend.hpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
  26 #define SHARE_OOPS_ACCESSBACKEND_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "oops/accessDecorators.hpp"

  32 #include "oops/oopsHierarchy.hpp"
  33 #include "runtime/globals.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 #include <type_traits>
  38 
  39 // This metafunction returns either oop or narrowOop depending on whether
  40 // an access needs to use compressed oops or not.
  41 template <DecoratorSet decorators>
  42 struct HeapOopType: AllStatic {
  43   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
  44                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  45   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  46 };
  47 








  48 namespace AccessInternal {
  49   enum BarrierType {
  50     BARRIER_STORE,
  51     BARRIER_STORE_AT,
  52     BARRIER_LOAD,
  53     BARRIER_LOAD_AT,
  54     BARRIER_ATOMIC_CMPXCHG,
  55     BARRIER_ATOMIC_CMPXCHG_AT,
  56     BARRIER_ATOMIC_XCHG,
  57     BARRIER_ATOMIC_XCHG_AT,
  58     BARRIER_ARRAYCOPY,
  59     BARRIER_CLONE

  60   };
  61 
  62   template <DecoratorSet decorators, typename T>
  63   struct MustConvertCompressedOop: public std::integral_constant<bool,
  64     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
  65     std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
  66     std::is_same<T, oop>::value> {};
  67 
  68   // This metafunction returns an appropriate oop type if the value is oop-like
  69   // and otherwise returns the same type T.
  70   template <DecoratorSet decorators, typename T>
  71   struct EncodedType: AllStatic {
  72     using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
  73                                     typename HeapOopType<decorators>::type,
  74                                     T>;
  75   };
  76 
  77   template <DecoratorSet decorators>
  78   inline typename HeapOopType<decorators>::type*
  79   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  80     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  81              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  82   }
  83 
  84   template <DecoratorSet decorators, typename T>
  85   struct AccessFunctionTypes {
  86     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
  87     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
  88     typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
  89     typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
  90 
  91     typedef T (*load_func_t)(void* addr);
  92     typedef void (*store_func_t)(void* addr, T value);
  93     typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
  94     typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
  95 
  96     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
  97                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
  98                                      size_t length);
  99     typedef void (*clone_func_t)(oop src, oop dst, size_t size);

 100   };
 101 
 102   template <DecoratorSet decorators>
 103   struct AccessFunctionTypes<decorators, void> {
 104     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 105                                      arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 106                                      size_t length);
 107   };
 108 
 109   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 110 
 111 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 112   template <DecoratorSet decorators, typename T>                    \
 113   struct AccessFunction<decorators, T, bt>: AllStatic{              \
 114     typedef typename AccessFunctionTypes<decorators, T>::func type; \
 115   }
 116   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
 117   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
 118   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
 119   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
 120   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
 121   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
 122   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
 123   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
 124   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
 125   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);

 126 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 127 
 128   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 129   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
 130 
 131   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 132   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
 133 
 134   void* field_addr(oop base, ptrdiff_t offset);
 135 
 136   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
 137   // faster build times, given how frequently included access is.
 138   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
 139   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
 140   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
 141 
 142   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
 143   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
 144 
 145   template<typename T>
 146   void arraycopy_conjoint(T* src, T* dst, size_t length);
 147   template<typename T>
 148   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
 149   template<typename T>
 150   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);


 151 }
 152 
 153 // This mask specifies what decorators are relevant for raw accesses. When passing
 154 // accesses to the raw layer, irrelevant decorators are removed.
 155 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
 156                                         ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
 157 
 158 // The RawAccessBarrier performs raw accesses with additional knowledge of
 159 // memory ordering, so that OrderAccess/Atomic is called when necessary.
 160 // It additionally handles compressed oops, and hence is not completely "raw"
 161 // strictly speaking.
 162 template <DecoratorSet decorators>
 163 class RawAccessBarrier: public AllStatic {
 164 protected:
 165   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 166     return AccessInternal::field_addr(base, byte_offset);
 167   }
 168 
 169 protected:
 170   // Only encode if INTERNAL_VALUE_IS_OOP

 270   static inline void store(void* addr, T value) {
 271     store_internal<decorators>(addr, value);
 272   }
 273 
 274   template <typename T>
 275   static inline T load(void* addr) {
 276     return load_internal<decorators, T>(addr);
 277   }
 278 
 279   template <typename T>
 280   static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 281     return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
 282   }
 283 
 284   template <typename T>
 285   static inline T atomic_xchg(void* addr, T new_value) {
 286     return atomic_xchg_internal<decorators>(addr, new_value);
 287   }
 288 
 289   template <typename T>
 290   static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 291                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 292                         size_t length);
 293 
 294   template <typename T>
 295   static void oop_store(void* addr, T value);
 296   template <typename T>
 297   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 298 
 299   template <typename T>
 300   static T oop_load(void* addr);
 301   template <typename T>
 302   static T oop_load_at(oop base, ptrdiff_t offset);
 303 
 304   template <typename T>
 305   static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
 306   template <typename T>
 307   static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
 308 
 309   template <typename T>
 310   static T oop_atomic_xchg(void* addr, T new_value);

 315   static void store_at(oop base, ptrdiff_t offset, T value) {
 316     store(field_addr(base, offset), value);
 317   }
 318 
 319   template <typename T>
 320   static T load_at(oop base, ptrdiff_t offset) {
 321     return load<T>(field_addr(base, offset));
 322   }
 323 
 324   template <typename T>
 325   static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 326     return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
 327   }
 328 
 329   template <typename T>
 330   static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 331     return atomic_xchg(field_addr(base, offset), new_value);
 332   }
 333 
 334   template <typename T>
 335   static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 336                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 337                             size_t length);
 338 
 339   static void clone(oop src, oop dst, size_t size);


 340 };
 341 
 342 namespace AccessInternal {
 343   DEBUG_ONLY(void check_access_thread_state());
 344 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
 345 }
 346 
 347 // Below is the implementation of the first 4 steps of the template pipeline:
 348 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 349 //           and sets default decorators to sensible values.
 350 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 351 //           multiple types. The P type of the address and T type of the value must
 352 //           match.
 353 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 354 //           avoided, and in that case avoids it (calling raw accesses or
 355 //           primitive accesses in a build that does not require primitive GC barriers)
 356 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
 357 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
 358 //           to the access.
 359 

 489   };
 490 
 491   template <DecoratorSet decorators, typename T>
 492   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 493     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 494     static func_t _atomic_xchg_at_func;
 495 
 496     static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
 497 
 498     static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 499       assert_access_thread_state();
 500       return _atomic_xchg_at_func(base, offset, new_value);
 501     }
 502   };
 503 
 504   template <DecoratorSet decorators, typename T>
 505   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 506     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 507     static func_t _arraycopy_func;
 508 
 509     static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 510                                arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 511                                size_t length);
 512 
 513     static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 514                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 515                                  size_t length) {
 516       assert_access_thread_state();
 517       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 518                              dst_obj, dst_offset_in_bytes, dst_raw,
 519                              length);
 520     }
 521   };
 522 
 523   template <DecoratorSet decorators, typename T>
 524   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 525     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 526     static func_t _clone_func;
 527 
 528     static void clone_init(oop src, oop dst, size_t size);
 529 
 530     static inline void clone(oop src, oop dst, size_t size) {
 531       assert_access_thread_state();
 532       _clone_func(src, dst, size);
 533     }
 534   };
 535 












 536   // Initialize the function pointers to point to the resolving function.
 537   template <DecoratorSet decorators, typename T>
 538   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 539   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 540 
 541   template <DecoratorSet decorators, typename T>
 542   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 543   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 544 
 545   template <DecoratorSet decorators, typename T>
 546   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 547   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 548 
 549   template <DecoratorSet decorators, typename T>
 550   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 551   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 552 
 553   template <DecoratorSet decorators, typename T>
 554   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 555   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;

 557   template <DecoratorSet decorators, typename T>
 558   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 559   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 560 
 561   template <DecoratorSet decorators, typename T>
 562   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 563   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 564 
 565   template <DecoratorSet decorators, typename T>
 566   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 567   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 568 
 569   template <DecoratorSet decorators, typename T>
 570   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 571   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 572 
 573   template <DecoratorSet decorators, typename T>
 574   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 575   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 576 




 577   // Step 3: Pre-runtime dispatching.
 578   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 579   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 580   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 581   // not possible.
 582   struct PreRuntimeDispatch: AllStatic {
 583     template<DecoratorSet decorators>
 584     struct CanHardwireRaw: public std::integral_constant<
 585       bool,
 586       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 587       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 588       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 589     {};
 590 
 591     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 592 
 593     template<DecoratorSet decorators>
 594     static bool is_hardwired_primitive() {
 595       return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 596     }

 804     inline static typename EnableIf<
 805       HasDecorator<decorators, AS_RAW>::value, T>::type
 806     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 807       return atomic_xchg<decorators>(field_addr(base, offset), new_value);
 808     }
 809 
 810     template <DecoratorSet decorators, typename T>
 811     inline static typename EnableIf<
 812       !HasDecorator<decorators, AS_RAW>::value, T>::type
 813     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 814       if (is_hardwired_primitive<decorators>()) {
 815         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 816         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
 817       } else {
 818         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
 819       }
 820     }
 821 
 822     template <DecoratorSet decorators, typename T>
 823     inline static typename EnableIf<
 824       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
 825     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 826               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 827               size_t length) {
 828       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 829       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 830         return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
 831                                   dst_obj, dst_offset_in_bytes, dst_raw,
 832                                   length);
 833       } else {
 834         return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 835                               dst_obj, dst_offset_in_bytes, dst_raw,
 836                               length);
 837       }
 838     }
 839 
 840     template <DecoratorSet decorators, typename T>
 841     inline static typename EnableIf<
 842       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
 843     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 844               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 845               size_t length) {
 846       if (UseCompressedOops) {
 847         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 848         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 849                                                                   dst_obj, dst_offset_in_bytes, dst_raw,
 850                                                                   length);
 851       } else {
 852         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 853         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 854                                                                   dst_obj, dst_offset_in_bytes, dst_raw,
 855                                                                   length);
 856       }
 857     }
 858 
 859     template <DecoratorSet decorators, typename T>
 860     inline static typename EnableIf<
 861       !HasDecorator<decorators, AS_RAW>::value, bool>::type
 862     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 863               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 864               size_t length) {
 865       if (is_hardwired_primitive<decorators>()) {
 866         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 867         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 868                                                                   dst_obj, dst_offset_in_bytes, dst_raw,
 869                                                                   length);
 870       } else {
 871         return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 872                                                                             dst_obj, dst_offset_in_bytes, dst_raw,
 873                                                                             length);
 874       }
 875     }
 876 
 877     template <DecoratorSet decorators>
 878     inline static typename EnableIf<
 879       HasDecorator<decorators, AS_RAW>::value>::type
 880     clone(oop src, oop dst, size_t size) {
 881       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 882       Raw::clone(src, dst, size);
 883     }
 884 
 885     template <DecoratorSet decorators>
 886     inline static typename EnableIf<
 887       !HasDecorator<decorators, AS_RAW>::value>::type
 888     clone(oop src, oop dst, size_t size) {
 889       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 890     }
















 891   };
 892 
 893   // Step 2: Reduce types.
 894   // Enforce that for non-oop types, T and P have to be strictly the same.
 895   // P is the type of the address and T is the type of the values.
 896   // As for oop types, it is allow to send T in {narrowOop, oop} and
 897   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 898   // the subsequent table. (columns are P, rows are T)
 899   // |           | HeapWord  |   oop   | narrowOop |
 900   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 901   // | narrowOop |     x     |    x    |  hw-none  |
 902   //
 903   // x means not allowed
 904   // rt-comp means it must be checked at runtime whether the oop is compressed.
 905   // hw-none means it is statically known the oop will not be compressed.
 906   // hw-comp means it is statically known the oop will be compressed.
 907 
 908   template <DecoratorSet decorators, typename T>
 909   inline void store_reduce_types(T* addr, T value) {
 910     PreRuntimeDispatch::store<decorators>(addr, value);

 985 
 986   template <DecoratorSet decorators, typename T>
 987   inline T load_reduce_types(T* addr) {
 988     return PreRuntimeDispatch::load<decorators, T>(addr);
 989   }
 990 
 991   template <DecoratorSet decorators, typename T>
 992   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
 993     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 994                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 995     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
 996   }
 997 
 998   template <DecoratorSet decorators, typename T>
 999   inline oop load_reduce_types(HeapWord* addr) {
1000     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1001     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1002   }
1003 
1004   template <DecoratorSet decorators, typename T>
1005   inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1006                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1007                                      size_t length) {
1008     return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1009                                                      dst_obj, dst_offset_in_bytes, dst_raw,
1010                                                      length);
1011   }
1012 
1013   template <DecoratorSet decorators>
1014   inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1015                                      arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1016                                      size_t length) {
1017     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1018     return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1019                                                               dst_obj, dst_offset_in_bytes, dst_raw,
1020                                                               length);
1021   }
1022 
1023   template <DecoratorSet decorators>
1024   inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1025                                      arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1026                                      size_t length) {
1027     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1028                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1029     return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1030                                                               dst_obj, dst_offset_in_bytes, dst_raw,
1031                                                               length);
1032   }
1033 
1034   // Step 1: Set default decorators. This step remembers if a type was volatile
1035   // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1036   // memory ordering is set for the access, and the implied decorator rules
1037   // are applied to select sensible defaults for decorators that have not been
1038   // explicitly set. For example, default object referent strength is set to strong.
1039   // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1040   // and references from the types). This step also perform some type verification
1041   // that the passed in types make sense.
1042 
1043   template <DecoratorSet decorators, typename T>
1044   static void verify_types(){
1045     // If this fails to compile, then you have sent in something that is
1046     // not recognized as a valid primitive type to a primitive Access function.
1047     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1048                    (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1049                     std::is_floating_point<T>::value)); // not allowed primitive type
1050   }
1051 

1144     DecayedT new_decayed_value = new_value;
1145     // atomic_xchg is only available in SEQ_CST flavour.
1146     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1147     return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1148                                                          new_decayed_value);
1149   }
1150 
1151   template <DecoratorSet decorators, typename T>
1152   inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1153     verify_types<decorators, T>();
1154     using DecayedT = std::decay_t<T>;
1155     DecayedT new_decayed_value = new_value;
1156     // atomic_xchg is only available in SEQ_CST flavour.
1157     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1158                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1159                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1160     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1161   }
1162 
1163   template <DecoratorSet decorators, typename T>
1164   inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1165                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1166                         size_t length) {
1167     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1168                    (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1169                     std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1170     using DecayedT = std::decay_t<T>;
1171     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1172     return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1173                                                        dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1174                                                        length);
1175   }
1176 
1177   template <DecoratorSet decorators>
1178   inline void clone(oop src, oop dst, size_t size) {
1179     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1180     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1181   }
1182 






1183   // Infer the type that should be returned from an Access::oop_load.
1184   template <typename P, DecoratorSet decorators>
1185   class OopLoadProxy: public StackObj {
1186   private:
1187     P *const _addr;
1188   public:
1189     explicit OopLoadProxy(P* addr) : _addr(addr) {}
1190 
1191     inline operator oop() {
1192       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1193     }
1194 
1195     inline operator narrowOop() {
1196       return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1197     }
1198 
1199     template <typename T>
1200     inline bool operator ==(const T& other) const {
1201       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1202     }

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
  26 #define SHARE_OOPS_ACCESSBACKEND_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "oops/accessDecorators.hpp"
  32 #include "oops/inlineKlass.hpp"
  33 #include "oops/oopsHierarchy.hpp"
  34 #include "runtime/globals.hpp"
  35 #include "utilities/debug.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 
  38 #include <type_traits>
  39 
  40 // This metafunction returns either oop or narrowOop depending on whether
  41 // an access needs to use compressed oops or not.
  42 template <DecoratorSet decorators>
  43 struct HeapOopType: AllStatic {
  44   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
  45                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  46   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  47 };
  48 
  49 // This meta-function returns either oop or narrowOop depending on whether
  50 // a back-end needs to consider compressed oops types or not.
  51 template <DecoratorSet decorators>
  52 struct ValueOopType: AllStatic {
  53   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  54   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  55 };
  56 
  57 namespace AccessInternal {
  58   enum BarrierType {
  59     BARRIER_STORE,
  60     BARRIER_STORE_AT,
  61     BARRIER_LOAD,
  62     BARRIER_LOAD_AT,
  63     BARRIER_ATOMIC_CMPXCHG,
  64     BARRIER_ATOMIC_CMPXCHG_AT,
  65     BARRIER_ATOMIC_XCHG,
  66     BARRIER_ATOMIC_XCHG_AT,
  67     BARRIER_ARRAYCOPY,
  68     BARRIER_CLONE,
  69     BARRIER_VALUE_COPY
  70   };
  71 
  72   template <DecoratorSet decorators, typename T>
  73   struct MustConvertCompressedOop: public std::integral_constant<bool,
  74     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
  75     std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
  76     std::is_same<T, oop>::value> {};
  77 
  78   // This metafunction returns an appropriate oop type if the value is oop-like
  79   // and otherwise returns the same type T.
  80   template <DecoratorSet decorators, typename T>
  81   struct EncodedType: AllStatic {
  82     using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
  83                                     typename HeapOopType<decorators>::type,
  84                                     T>;
  85   };
  86 
  87   template <DecoratorSet decorators>
  88   inline typename HeapOopType<decorators>::type*
  89   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  90     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  91              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  92   }
  93 
  94   template <DecoratorSet decorators, typename T>
  95   struct AccessFunctionTypes {
  96     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
  97     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
  98     typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
  99     typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
 100 
 101     typedef T (*load_func_t)(void* addr);
 102     typedef void (*store_func_t)(void* addr, T value);
 103     typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
 104     typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
 105 
 106     typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 107                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 108                                      size_t length);
 109     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
 110     typedef void (*value_copy_func_t)(void* src, void* dst, InlineKlass* md, LayoutKind lk);
 111   };
 112 
 113   template <DecoratorSet decorators>
 114   struct AccessFunctionTypes<decorators, void> {
 115     typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 116                                      arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 117                                      size_t length);
 118   };
 119 
 120   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 121 
 122 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 123   template <DecoratorSet decorators, typename T>                    \
 124   struct AccessFunction<decorators, T, bt>: AllStatic{              \
 125     typedef typename AccessFunctionTypes<decorators, T>::func type; \
 126   }
 127   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
 128   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
 129   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
 130   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
 131   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
 132   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
 133   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
 134   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
 135   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
 136   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
 137   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
 138 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 139 
 140   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 141   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
 142 
 143   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 144   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
 145 
 146   void* field_addr(oop base, ptrdiff_t offset);
 147 
 148   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
 149   // faster build times, given how frequently included access is.
 150   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
 151   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
 152   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
 153 
 154   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
 155   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
 156 
 157   template<typename T>
 158   void arraycopy_conjoint(T* src, T* dst, size_t length);
 159   template<typename T>
 160   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
 161   template<typename T>
 162   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
 163 
 164   void value_copy_internal(void* src, void* dst, size_t length);
 165 }
 166 
 167 // This mask specifies what decorators are relevant for raw accesses. When passing
 168 // accesses to the raw layer, irrelevant decorators are removed.
 169 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
 170                                         ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
 171 
 172 // The RawAccessBarrier performs raw accesses with additional knowledge of
 173 // memory ordering, so that OrderAccess/Atomic is called when necessary.
 174 // It additionally handles compressed oops, and hence is not completely "raw"
 175 // strictly speaking.
 176 template <DecoratorSet decorators>
 177 class RawAccessBarrier: public AllStatic {
 178 protected:
 179   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 180     return AccessInternal::field_addr(base, byte_offset);
 181   }
 182 
 183 protected:
 184   // Only encode if INTERNAL_VALUE_IS_OOP

 284   static inline void store(void* addr, T value) {
 285     store_internal<decorators>(addr, value);
 286   }
 287 
 288   template <typename T>
 289   static inline T load(void* addr) {
 290     return load_internal<decorators, T>(addr);
 291   }
 292 
 293   template <typename T>
 294   static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 295     return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
 296   }
 297 
 298   template <typename T>
 299   static inline T atomic_xchg(void* addr, T new_value) {
 300     return atomic_xchg_internal<decorators>(addr, new_value);
 301   }
 302 
 303   template <typename T>
 304   static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 305                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 306                         size_t length);
 307 
 308   template <typename T>
 309   static void oop_store(void* addr, T value);
 310   template <typename T>
 311   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 312 
 313   template <typename T>
 314   static T oop_load(void* addr);
 315   template <typename T>
 316   static T oop_load_at(oop base, ptrdiff_t offset);
 317 
 318   template <typename T>
 319   static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
 320   template <typename T>
 321   static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
 322 
 323   template <typename T>
 324   static T oop_atomic_xchg(void* addr, T new_value);

 329   static void store_at(oop base, ptrdiff_t offset, T value) {
 330     store(field_addr(base, offset), value);
 331   }
 332 
 333   template <typename T>
 334   static T load_at(oop base, ptrdiff_t offset) {
 335     return load<T>(field_addr(base, offset));
 336   }
 337 
 338   template <typename T>
 339   static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 340     return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
 341   }
 342 
 343   template <typename T>
 344   static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 345     return atomic_xchg(field_addr(base, offset), new_value);
 346   }
 347 
 348   template <typename T>
 349   static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 350                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 351                             size_t length);
 352 
 353   static void clone(oop src, oop dst, size_t size);
 354   static void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk);
 355 
 356 };
 357 
 358 namespace AccessInternal {
 359   DEBUG_ONLY(void check_access_thread_state());
 360 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
 361 }
 362 
 363 // Below is the implementation of the first 4 steps of the template pipeline:
 364 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 365 //           and sets default decorators to sensible values.
 366 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 367 //           multiple types. The P type of the address and T type of the value must
 368 //           match.
 369 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 370 //           avoided, and in that case avoids it (calling raw accesses or
 371 //           primitive accesses in a build that does not require primitive GC barriers)
 372 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
 373 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
 374 //           to the access.
 375 

 505   };
 506 
 507   template <DecoratorSet decorators, typename T>
 508   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 509     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 510     static func_t _atomic_xchg_at_func;
 511 
 512     static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
 513 
 514     static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 515       assert_access_thread_state();
 516       return _atomic_xchg_at_func(base, offset, new_value);
 517     }
 518   };
 519 
 520   template <DecoratorSet decorators, typename T>
 521   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 522     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 523     static func_t _arraycopy_func;
 524 
 525     static void arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 526                                arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 527                                size_t length);
 528 
 529     static inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 530                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 531                                  size_t length) {
 532       assert_access_thread_state();
 533       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 534                              dst_obj, dst_offset_in_bytes, dst_raw,
 535                              length);
 536     }
 537   };
 538 
 539   template <DecoratorSet decorators, typename T>
 540   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 541     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 542     static func_t _clone_func;
 543 
 544     static void clone_init(oop src, oop dst, size_t size);
 545 
 546     static inline void clone(oop src, oop dst, size_t size) {
 547       assert_access_thread_state();
 548       _clone_func(src, dst, size);
 549     }
 550   };
 551 
 552   template <DecoratorSet decorators, typename T>
 553   struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
 554     typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
 555     static func_t _value_copy_func;
 556 
 557     static void value_copy_init(void* src, void* dst, InlineKlass* md, LayoutKind lk);
 558 
 559     static inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
 560       _value_copy_func(src, dst, md, lk);
 561     }
 562   };
 563 
 564   // Initialize the function pointers to point to the resolving function.
 565   template <DecoratorSet decorators, typename T>
 566   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 567   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 568 
 569   template <DecoratorSet decorators, typename T>
 570   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 571   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 572 
 573   template <DecoratorSet decorators, typename T>
 574   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 575   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 576 
 577   template <DecoratorSet decorators, typename T>
 578   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 579   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 580 
 581   template <DecoratorSet decorators, typename T>
 582   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 583   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;

 585   template <DecoratorSet decorators, typename T>
 586   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 587   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 588 
 589   template <DecoratorSet decorators, typename T>
 590   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 591   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 592 
 593   template <DecoratorSet decorators, typename T>
 594   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 595   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 596 
 597   template <DecoratorSet decorators, typename T>
 598   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 599   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 600 
 601   template <DecoratorSet decorators, typename T>
 602   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 603   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 604 
 605   template <DecoratorSet decorators, typename T>
 606   typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
 607   RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
 608 
 609   // Step 3: Pre-runtime dispatching.
 610   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 611   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 612   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 613   // not possible.
 614   struct PreRuntimeDispatch: AllStatic {
 615     template<DecoratorSet decorators>
 616     struct CanHardwireRaw: public std::integral_constant<
 617       bool,
 618       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 619       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 620       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 621     {};
 622 
 623     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 624 
 625     template<DecoratorSet decorators>
 626     static bool is_hardwired_primitive() {
 627       return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 628     }

 836     inline static typename EnableIf<
 837       HasDecorator<decorators, AS_RAW>::value, T>::type
 838     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 839       return atomic_xchg<decorators>(field_addr(base, offset), new_value);
 840     }
 841 
 842     template <DecoratorSet decorators, typename T>
 843     inline static typename EnableIf<
 844       !HasDecorator<decorators, AS_RAW>::value, T>::type
 845     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 846       if (is_hardwired_primitive<decorators>()) {
 847         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 848         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
 849       } else {
 850         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
 851       }
 852     }
 853 
 854     template <DecoratorSet decorators, typename T>
 855     inline static typename EnableIf<
 856       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, void>::type
 857     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 858               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 859               size_t length) {
 860       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 861       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 862         Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
 863                            dst_obj, dst_offset_in_bytes, dst_raw,
 864                            length);
 865       } else {
 866         Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 867                        dst_obj, dst_offset_in_bytes, dst_raw,
 868                        length);
 869       }
 870     }
 871 
 872     template <DecoratorSet decorators, typename T>
 873     inline static typename EnableIf<
 874       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, void>::type
 875     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 876               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 877               size_t length) {
 878       if (UseCompressedOops) {
 879         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 880         PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 881                                                            dst_obj, dst_offset_in_bytes, dst_raw,
 882                                                            length);
 883       } else {
 884         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 885         PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 886                                                            dst_obj, dst_offset_in_bytes, dst_raw,
 887                                                            length);
 888       }
 889     }
 890 
 891     template <DecoratorSet decorators, typename T>
 892     inline static typename EnableIf<
 893       !HasDecorator<decorators, AS_RAW>::value, void>::type
 894     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 895               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 896               size_t length) {
 897       if (is_hardwired_primitive<decorators>()) {
 898         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 899         PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 900                                                            dst_obj, dst_offset_in_bytes, dst_raw,
 901                                                            length);
 902       } else {
 903         RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 904                                                                      dst_obj, dst_offset_in_bytes, dst_raw,
 905                                                                      length);
 906       }
 907     }
 908 
 909     template <DecoratorSet decorators>
 910     inline static typename EnableIf<
 911       HasDecorator<decorators, AS_RAW>::value>::type
 912     clone(oop src, oop dst, size_t size) {
 913       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 914       Raw::clone(src, dst, size);
 915     }
 916 
 917     template <DecoratorSet decorators>
 918     inline static typename EnableIf<
 919       !HasDecorator<decorators, AS_RAW>::value>::type
 920     clone(oop src, oop dst, size_t size) {
 921       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 922     }
 923 
 924     template <DecoratorSet decorators>
 925     inline static typename EnableIf<
 926       HasDecorator<decorators, AS_RAW>::value>::type
 927     value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
 928       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 929       Raw::value_copy(src, dst, md, lk);
 930     }
 931 
 932     template <DecoratorSet decorators>
 933     inline static typename EnableIf<
 934       !HasDecorator<decorators, AS_RAW>::value>::type
 935       value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
 936       const DecoratorSet expanded_decorators = decorators;
 937       RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md, lk);
 938     }
 939   };
 940 
 941   // Step 2: Reduce types.
 942   // Enforce that for non-oop types, T and P have to be strictly the same.
 943   // P is the type of the address and T is the type of the values.
 944   // As for oop types, it is allow to send T in {narrowOop, oop} and
 945   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 946   // the subsequent table. (columns are P, rows are T)
 947   // |           | HeapWord  |   oop   | narrowOop |
 948   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 949   // | narrowOop |     x     |    x    |  hw-none  |
 950   //
 951   // x means not allowed
 952   // rt-comp means it must be checked at runtime whether the oop is compressed.
 953   // hw-none means it is statically known the oop will not be compressed.
 954   // hw-comp means it is statically known the oop will be compressed.
 955 
 956   template <DecoratorSet decorators, typename T>
 957   inline void store_reduce_types(T* addr, T value) {
 958     PreRuntimeDispatch::store<decorators>(addr, value);

1033 
1034   template <DecoratorSet decorators, typename T>
1035   inline T load_reduce_types(T* addr) {
1036     return PreRuntimeDispatch::load<decorators, T>(addr);
1037   }
1038 
1039   template <DecoratorSet decorators, typename T>
1040   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1041     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1042                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1043     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1044   }
1045 
1046   template <DecoratorSet decorators, typename T>
1047   inline oop load_reduce_types(HeapWord* addr) {
1048     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1049     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1050   }
1051 
1052   template <DecoratorSet decorators, typename T>
1053   inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1054                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1055                                      size_t length) {
1056     PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1057                                               dst_obj, dst_offset_in_bytes, dst_raw,
1058                                               length);
1059   }
1060 
1061   template <DecoratorSet decorators>
1062   inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1063                                      arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1064                                      size_t length) {
1065     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1066     PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1067                                                        dst_obj, dst_offset_in_bytes, dst_raw,
1068                                                        length);
1069   }
1070 
1071   template <DecoratorSet decorators>
1072   inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1073                                      arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1074                                      size_t length) {
1075     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1076                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1077     PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1078                                                        dst_obj, dst_offset_in_bytes, dst_raw,
1079                                                        length);
1080   }
1081 
1082   // Step 1: Set default decorators. This step remembers if a type was volatile
1083   // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1084   // memory ordering is set for the access, and the implied decorator rules
1085   // are applied to select sensible defaults for decorators that have not been
1086   // explicitly set. For example, default object referent strength is set to strong.
1087   // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1088   // and references from the types). This step also perform some type verification
1089   // that the passed in types make sense.
1090 
1091   template <DecoratorSet decorators, typename T>
1092   static void verify_types(){
1093     // If this fails to compile, then you have sent in something that is
1094     // not recognized as a valid primitive type to a primitive Access function.
1095     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1096                    (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1097                     std::is_floating_point<T>::value)); // not allowed primitive type
1098   }
1099 

1192     DecayedT new_decayed_value = new_value;
1193     // atomic_xchg is only available in SEQ_CST flavour.
1194     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1195     return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1196                                                          new_decayed_value);
1197   }
1198 
1199   template <DecoratorSet decorators, typename T>
1200   inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1201     verify_types<decorators, T>();
1202     using DecayedT = std::decay_t<T>;
1203     DecayedT new_decayed_value = new_value;
1204     // atomic_xchg is only available in SEQ_CST flavour.
1205     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1206                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1207                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1208     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1209   }
1210 
1211   template <DecoratorSet decorators, typename T>
1212   inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1213                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1214                         size_t length) {
1215     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1216                    (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1217                     std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1218     using DecayedT = std::decay_t<T>;
1219     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1220     arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1221                                                 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1222                                                 length);
1223   }
1224 
1225   template <DecoratorSet decorators>
1226   inline void clone(oop src, oop dst, size_t size) {
1227     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1228     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1229   }
1230 
1231   template <DecoratorSet decorators>
1232   inline void value_copy(void* src, void* dst, InlineKlass* md, LayoutKind lk) {
1233     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1234     PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md, lk);
1235   }
1236 
1237   // Infer the type that should be returned from an Access::oop_load.
1238   template <typename P, DecoratorSet decorators>
1239   class OopLoadProxy: public StackObj {
1240   private:
1241     P *const _addr;
1242   public:
1243     explicit OopLoadProxy(P* addr) : _addr(addr) {}
1244 
1245     inline operator oop() {
1246       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1247     }
1248 
1249     inline operator narrowOop() {
1250       return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1251     }
1252 
1253     template <typename T>
1254     inline bool operator ==(const T& other) const {
1255       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1256     }
< prev index next >