1 /*
   2  * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
  26 #define SHARE_OOPS_ACCESSBACKEND_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "oops/accessDecorators.hpp"
  32 #include "oops/oopsHierarchy.hpp"
  33 #include "runtime/globals.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 #include <type_traits>
  38 
  39 // This metafunction returns either oop or narrowOop depending on whether
  40 // an access needs to use compressed oops or not.
  41 template <DecoratorSet decorators>
  42 struct HeapOopType: AllStatic {
  43   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
  44                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  45   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  46 };
  47 
  48 // This meta-function returns either oop or narrowOop depending on whether
  49 // a back-end needs to consider compressed oops types or not.
  50 template <DecoratorSet decorators>
  51 struct ValueOopType: AllStatic {
  52   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  53   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  54 };
  55 
  56 namespace AccessInternal {
  57   enum BarrierType {
  58     BARRIER_STORE,
  59     BARRIER_STORE_AT,
  60     BARRIER_LOAD,
  61     BARRIER_LOAD_AT,
  62     BARRIER_ATOMIC_CMPXCHG,
  63     BARRIER_ATOMIC_CMPXCHG_AT,
  64     BARRIER_ATOMIC_XCHG,
  65     BARRIER_ATOMIC_XCHG_AT,
  66     BARRIER_ARRAYCOPY,
  67     BARRIER_CLONE,
  68     BARRIER_VALUE_COPY
  69   };
  70 
  71   template <DecoratorSet decorators, typename T>
  72   struct MustConvertCompressedOop: public std::integral_constant<bool,
  73     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
  74     std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
  75     std::is_same<T, oop>::value> {};
  76 
  77   // This metafunction returns an appropriate oop type if the value is oop-like
  78   // and otherwise returns the same type T.
  79   template <DecoratorSet decorators, typename T>
  80   struct EncodedType: AllStatic {
  81     using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
  82                                     typename HeapOopType<decorators>::type,
  83                                     T>;
  84   };
  85 
  86   template <DecoratorSet decorators>
  87   inline typename HeapOopType<decorators>::type*
  88   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  89     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  90              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  91   }
  92 
  93   // This metafunction returns whether it is possible for a type T to require
  94   // locking to support wide atomics or not.
  95   template <typename T>
  96 #ifdef SUPPORTS_NATIVE_CX8
  97   struct PossiblyLockedAccess: public std::false_type {};
  98 #else
  99   struct PossiblyLockedAccess: public std::integral_constant<bool, (sizeof(T) > 4)> {};
 100 #endif
 101 
 102   template <DecoratorSet decorators, typename T>
 103   struct AccessFunctionTypes {
 104     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
 105     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
 106     typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
 107     typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
 108 
 109     typedef T (*load_func_t)(void* addr);
 110     typedef void (*store_func_t)(void* addr, T value);
 111     typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
 112     typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
 113 
 114     typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 115                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 116                                      size_t length);
 117     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
 118     typedef void (*value_copy_func_t)(void* src, void* dst, InlineKlass* md);
 119   };
 120 
 121   template <DecoratorSet decorators>
 122   struct AccessFunctionTypes<decorators, void> {
 123     typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 124                                      arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 125                                      size_t length);
 126   };
 127 
 128   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 129 
 130 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 131   template <DecoratorSet decorators, typename T>                    \
 132   struct AccessFunction<decorators, T, bt>: AllStatic{              \
 133     typedef typename AccessFunctionTypes<decorators, T>::func type; \
 134   }
 135   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
 136   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
 137   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
 138   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
 139   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
 140   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
 141   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
 142   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
 143   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
 144   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
 145   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
 146 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 147 
 148   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 149   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
 150 
 151   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 152   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
 153 
 154   class AccessLocker {
 155   public:
 156     AccessLocker();
 157     ~AccessLocker();
 158   };
 159   bool wide_atomic_needs_locking();
 160 
 161   void* field_addr(oop base, ptrdiff_t offset);
 162 
 163   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
 164   // faster build times, given how frequently included access is.
 165   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
 166   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
 167   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
 168 
 169   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
 170   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
 171 
 172   template<typename T>
 173   void arraycopy_conjoint(T* src, T* dst, size_t length);
 174   template<typename T>
 175   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
 176   template<typename T>
 177   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
 178 }
 179 
 180 // This mask specifies what decorators are relevant for raw accesses. When passing
 181 // accesses to the raw layer, irrelevant decorators are removed.
 182 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
 183                                         ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
 184 
 185 // The RawAccessBarrier performs raw accesses with additional knowledge of
 186 // memory ordering, so that OrderAccess/Atomic is called when necessary.
 187 // It additionally handles compressed oops, and hence is not completely "raw"
 188 // strictly speaking.
 189 template <DecoratorSet decorators>
 190 class RawAccessBarrier: public AllStatic {
 191 protected:
 192   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 193     return AccessInternal::field_addr(base, byte_offset);
 194   }
 195 
 196 protected:
 197   // Only encode if INTERNAL_VALUE_IS_OOP
 198   template <DecoratorSet idecorators, typename T>
 199   static inline typename EnableIf<
 200     AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
 201     typename HeapOopType<idecorators>::type>::type
 202   encode_internal(T value);
 203 
 204   template <DecoratorSet idecorators, typename T>
 205   static inline typename EnableIf<
 206     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 207   encode_internal(T value) {
 208     return value;
 209   }
 210 
 211   template <typename T>
 212   static inline typename AccessInternal::EncodedType<decorators, T>::type
 213   encode(T value) {
 214     return encode_internal<decorators, T>(value);
 215   }
 216 
 217   // Only decode if INTERNAL_VALUE_IS_OOP
 218   template <DecoratorSet idecorators, typename T>
 219   static inline typename EnableIf<
 220     AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 221   decode_internal(typename HeapOopType<idecorators>::type value);
 222 
 223   template <DecoratorSet idecorators, typename T>
 224   static inline typename EnableIf<
 225     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 226   decode_internal(T value) {
 227     return value;
 228   }
 229 
 230   template <typename T>
 231   static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
 232     return decode_internal<decorators, T>(value);
 233   }
 234 
 235 protected:
 236   template <DecoratorSet ds, typename T>
 237   static typename EnableIf<
 238     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 239   load_internal(void* addr);
 240 
 241   template <DecoratorSet ds, typename T>
 242   static typename EnableIf<
 243     HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 244   load_internal(void* addr);
 245 
 246   template <DecoratorSet ds, typename T>
 247   static typename EnableIf<
 248     HasDecorator<ds, MO_RELAXED>::value, T>::type
 249   load_internal(void* addr);
 250 
 251   template <DecoratorSet ds, typename T>
 252   static inline typename EnableIf<
 253     HasDecorator<ds, MO_UNORDERED>::value, T>::type
 254   load_internal(void* addr) {
 255     return *reinterpret_cast<T*>(addr);
 256   }
 257 
 258   template <DecoratorSet ds, typename T>
 259   static typename EnableIf<
 260     HasDecorator<ds, MO_SEQ_CST>::value>::type
 261   store_internal(void* addr, T value);
 262 
 263   template <DecoratorSet ds, typename T>
 264   static typename EnableIf<
 265     HasDecorator<ds, MO_RELEASE>::value>::type
 266   store_internal(void* addr, T value);
 267 
 268   template <DecoratorSet ds, typename T>
 269   static typename EnableIf<
 270     HasDecorator<ds, MO_RELAXED>::value>::type
 271   store_internal(void* addr, T value);
 272 
 273   template <DecoratorSet ds, typename T>
 274   static inline typename EnableIf<
 275     HasDecorator<ds, MO_UNORDERED>::value>::type
 276   store_internal(void* addr, T value) {
 277     *reinterpret_cast<T*>(addr) = value;
 278   }
 279 
 280   template <DecoratorSet ds, typename T>
 281   static typename EnableIf<
 282     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 283   atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
 284 
 285   template <DecoratorSet ds, typename T>
 286   static typename EnableIf<
 287     HasDecorator<ds, MO_RELAXED>::value, T>::type
 288   atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
 289 
 290   template <DecoratorSet ds, typename T>
 291   static typename EnableIf<
 292     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 293   atomic_xchg_internal(void* addr, T new_value);
 294 
 295   // The following *_locked mechanisms serve the purpose of handling atomic operations
 296   // that are larger than a machine can handle, and then possibly opt for using
 297   // a slower path using a mutex to perform the operation.
 298 
 299   template <DecoratorSet ds, typename T>
 300   static inline typename EnableIf<
 301     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 302   atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
 303     return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
 304   }
 305 
 306   template <DecoratorSet ds, typename T>
 307   static typename EnableIf<
 308     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 309   atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value);
 310 
 311   template <DecoratorSet ds, typename T>
 312   static inline typename EnableIf<
 313     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 314   atomic_xchg_maybe_locked(void* addr, T new_value) {
 315     return atomic_xchg_internal<ds>(addr, new_value);
 316   }
 317 
 318   template <DecoratorSet ds, typename T>
 319   static typename EnableIf<
 320     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 321   atomic_xchg_maybe_locked(void* addr, T new_value);
 322 
 323 public:
 324   template <typename T>
 325   static inline void store(void* addr, T value) {
 326     store_internal<decorators>(addr, value);
 327   }
 328 
 329   template <typename T>
 330   static inline T load(void* addr) {
 331     return load_internal<decorators, T>(addr);
 332   }
 333 
 334   template <typename T>
 335   static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 336     return atomic_cmpxchg_maybe_locked<decorators>(addr, compare_value, new_value);
 337   }
 338 
 339   template <typename T>
 340   static inline T atomic_xchg(void* addr, T new_value) {
 341     return atomic_xchg_maybe_locked<decorators>(addr, new_value);
 342   }
 343 
 344   template <typename T>
 345   static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 346                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 347                         size_t length);
 348 
 349   template <typename T>
 350   static void oop_store(void* addr, T value);
 351   template <typename T>
 352   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 353 
 354   template <typename T>
 355   static T oop_load(void* addr);
 356   template <typename T>
 357   static T oop_load_at(oop base, ptrdiff_t offset);
 358 
 359   template <typename T>
 360   static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
 361   template <typename T>
 362   static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
 363 
 364   template <typename T>
 365   static T oop_atomic_xchg(void* addr, T new_value);
 366   template <typename T>
 367   static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
 368 
 369   template <typename T>
 370   static void store_at(oop base, ptrdiff_t offset, T value) {
 371     store(field_addr(base, offset), value);
 372   }
 373 
 374   template <typename T>
 375   static T load_at(oop base, ptrdiff_t offset) {
 376     return load<T>(field_addr(base, offset));
 377   }
 378 
 379   template <typename T>
 380   static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 381     return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
 382   }
 383 
 384   template <typename T>
 385   static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 386     return atomic_xchg(field_addr(base, offset), new_value);
 387   }
 388 
 389   template <typename T>
 390   static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 391                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 392                             size_t length);
 393 
 394   static void clone(oop src, oop dst, size_t size);
 395   static void value_copy(void* src, void* dst, InlineKlass* md);
 396 
 397 };
 398 
 399 namespace AccessInternal {
 400   DEBUG_ONLY(void check_access_thread_state());
 401 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
 402 }
 403 
 404 // Below is the implementation of the first 4 steps of the template pipeline:
 405 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 406 //           and sets default decorators to sensible values.
 407 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 408 //           multiple types. The P type of the address and T type of the value must
 409 //           match.
 410 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 411 //           avoided, and in that case avoids it (calling raw accesses or
 412 //           primitive accesses in a build that does not require primitive GC barriers)
 413 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
 414 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
 415 //           to the access.
 416 
 417 namespace AccessInternal {
 418   template <typename T>
 419   struct OopOrNarrowOopInternal: AllStatic {
 420     typedef oop type;
 421   };
 422 
 423   template <>
 424   struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
 425     typedef narrowOop type;
 426   };
 427 
 428   // This metafunction returns a canonicalized oop/narrowOop type for a passed
 429   // in oop-like types passed in from oop_* overloads where the user has sworn
 430   // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
 431   // narrowOoop, instanceOopDesc*, and random other things).
 432   // In the oop_* overloads, it must hold that if the passed in type T is not
 433   // narrowOop, then it by contract has to be one of many oop-like types implicitly
 434   // convertible to oop, and hence returns oop as the canonical oop type.
 435   // If it turns out it was not, then the implicit conversion to oop will fail
 436   // to compile, as desired.
 437   template <typename T>
 438   struct OopOrNarrowOop: AllStatic {
 439     typedef typename OopOrNarrowOopInternal<std::decay_t<T>>::type type;
 440   };
 441 
 442   inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 443     return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
 444   }
 445   // Step 4: Runtime dispatch
 446   // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
 447   // accessor. This is required when the access either depends on whether compressed oops
 448   // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
 449   // barriers). The way it works is that a function pointer initially pointing to an
 450   // accessor resolution function gets called for each access. Upon first invocation,
 451   // it resolves which accessor to be used in future invocations and patches the
 452   // function pointer to this new accessor.
 453 
 454   template <DecoratorSet decorators, typename T, BarrierType type>
 455   struct RuntimeDispatch: AllStatic {};
 456 
 457   template <DecoratorSet decorators, typename T>
 458   struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
 459     typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
 460     static func_t _store_func;
 461 
 462     static void store_init(void* addr, T value);
 463 
 464     static inline void store(void* addr, T value) {
 465       assert_access_thread_state();
 466       _store_func(addr, value);
 467     }
 468   };
 469 
 470   template <DecoratorSet decorators, typename T>
 471   struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
 472     typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
 473     static func_t _store_at_func;
 474 
 475     static void store_at_init(oop base, ptrdiff_t offset, T value);
 476 
 477     static inline void store_at(oop base, ptrdiff_t offset, T value) {
 478       assert_access_thread_state();
 479       _store_at_func(base, offset, value);
 480     }
 481   };
 482 
 483   template <DecoratorSet decorators, typename T>
 484   struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
 485     typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
 486     static func_t _load_func;
 487 
 488     static T load_init(void* addr);
 489 
 490     static inline T load(void* addr) {
 491       assert_access_thread_state();
 492       return _load_func(addr);
 493     }
 494   };
 495 
 496   template <DecoratorSet decorators, typename T>
 497   struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
 498     typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
 499     static func_t _load_at_func;
 500 
 501     static T load_at_init(oop base, ptrdiff_t offset);
 502 
 503     static inline T load_at(oop base, ptrdiff_t offset) {
 504       assert_access_thread_state();
 505       return _load_at_func(base, offset);
 506     }
 507   };
 508 
 509   template <DecoratorSet decorators, typename T>
 510   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
 511     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
 512     static func_t _atomic_cmpxchg_func;
 513 
 514     static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
 515 
 516     static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 517       assert_access_thread_state();
 518       return _atomic_cmpxchg_func(addr, compare_value, new_value);
 519     }
 520   };
 521 
 522   template <DecoratorSet decorators, typename T>
 523   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
 524     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
 525     static func_t _atomic_cmpxchg_at_func;
 526 
 527     static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
 528 
 529     static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 530       assert_access_thread_state();
 531       return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
 532     }
 533   };
 534 
 535   template <DecoratorSet decorators, typename T>
 536   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
 537     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
 538     static func_t _atomic_xchg_func;
 539 
 540     static T atomic_xchg_init(void* addr, T new_value);
 541 
 542     static inline T atomic_xchg(void* addr, T new_value) {
 543       assert_access_thread_state();
 544       return _atomic_xchg_func(addr, new_value);
 545     }
 546   };
 547 
 548   template <DecoratorSet decorators, typename T>
 549   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 550     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 551     static func_t _atomic_xchg_at_func;
 552 
 553     static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
 554 
 555     static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 556       assert_access_thread_state();
 557       return _atomic_xchg_at_func(base, offset, new_value);
 558     }
 559   };
 560 
 561   template <DecoratorSet decorators, typename T>
 562   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 563     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 564     static func_t _arraycopy_func;
 565 
 566     static void arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 567                                arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 568                                size_t length);
 569 
 570     static inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 571                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 572                                  size_t length) {
 573       assert_access_thread_state();
 574       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 575                              dst_obj, dst_offset_in_bytes, dst_raw,
 576                              length);
 577     }
 578   };
 579 
 580   template <DecoratorSet decorators, typename T>
 581   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 582     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 583     static func_t _clone_func;
 584 
 585     static void clone_init(oop src, oop dst, size_t size);
 586 
 587     static inline void clone(oop src, oop dst, size_t size) {
 588       assert_access_thread_state();
 589       _clone_func(src, dst, size);
 590     }
 591   };
 592 
 593   template <DecoratorSet decorators, typename T>
 594   struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
 595     typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
 596     static func_t _value_copy_func;
 597 
 598     static void value_copy_init(void* src, void* dst, InlineKlass* md);
 599 
 600     static inline void value_copy(void* src, void* dst, InlineKlass* md) {
 601       _value_copy_func(src, dst, md);
 602     }
 603   };
 604 
 605   // Initialize the function pointers to point to the resolving function.
 606   template <DecoratorSet decorators, typename T>
 607   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 608   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 609 
 610   template <DecoratorSet decorators, typename T>
 611   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 612   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 613 
 614   template <DecoratorSet decorators, typename T>
 615   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 616   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 617 
 618   template <DecoratorSet decorators, typename T>
 619   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 620   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 621 
 622   template <DecoratorSet decorators, typename T>
 623   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 624   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
 625 
 626   template <DecoratorSet decorators, typename T>
 627   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 628   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 629 
 630   template <DecoratorSet decorators, typename T>
 631   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 632   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 633 
 634   template <DecoratorSet decorators, typename T>
 635   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 636   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 637 
 638   template <DecoratorSet decorators, typename T>
 639   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 640   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 641 
 642   template <DecoratorSet decorators, typename T>
 643   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 644   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 645 
 646   template <DecoratorSet decorators, typename T>
 647   typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
 648   RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
 649 
 650   // Step 3: Pre-runtime dispatching.
 651   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 652   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 653   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 654   // not possible.
 655   struct PreRuntimeDispatch: AllStatic {
 656     template<DecoratorSet decorators>
 657     struct CanHardwireRaw: public std::integral_constant<
 658       bool,
 659       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 660       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 661       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 662     {};
 663 
 664     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 665 
 666     template<DecoratorSet decorators>
 667     static bool is_hardwired_primitive() {
 668       return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 669     }
 670 
 671     template <DecoratorSet decorators, typename T>
 672     inline static typename EnableIf<
 673       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
 674     store(void* addr, T value) {
 675       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 676       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 677         Raw::oop_store(addr, value);
 678       } else {
 679         Raw::store(addr, value);
 680       }
 681     }
 682 
 683     template <DecoratorSet decorators, typename T>
 684     inline static typename EnableIf<
 685       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
 686     store(void* addr, T value) {
 687       if (UseCompressedOops) {
 688         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 689         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 690       } else {
 691         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 692         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 693       }
 694     }
 695 
 696     template <DecoratorSet decorators, typename T>
 697     inline static typename EnableIf<
 698       !HasDecorator<decorators, AS_RAW>::value>::type
 699     store(void* addr, T value) {
 700       if (is_hardwired_primitive<decorators>()) {
 701         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 702         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 703       } else {
 704         RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
 705       }
 706     }
 707 
 708     template <DecoratorSet decorators, typename T>
 709     inline static typename EnableIf<
 710       HasDecorator<decorators, AS_RAW>::value>::type
 711     store_at(oop base, ptrdiff_t offset, T value) {
 712       store<decorators>(field_addr(base, offset), value);
 713     }
 714 
 715     template <DecoratorSet decorators, typename T>
 716     inline static typename EnableIf<
 717       !HasDecorator<decorators, AS_RAW>::value>::type
 718     store_at(oop base, ptrdiff_t offset, T value) {
 719       if (is_hardwired_primitive<decorators>()) {
 720         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 721         PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
 722       } else {
 723         RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
 724       }
 725     }
 726 
 727     template <DecoratorSet decorators, typename T>
 728     inline static typename EnableIf<
 729       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 730     load(void* addr) {
 731       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 732       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 733         return Raw::template oop_load<T>(addr);
 734       } else {
 735         return Raw::template load<T>(addr);
 736       }
 737     }
 738 
 739     template <DecoratorSet decorators, typename T>
 740     inline static typename EnableIf<
 741       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 742     load(void* addr) {
 743       if (UseCompressedOops) {
 744         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 745         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 746       } else {
 747         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 748         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 749       }
 750     }
 751 
 752     template <DecoratorSet decorators, typename T>
 753     inline static typename EnableIf<
 754       !HasDecorator<decorators, AS_RAW>::value, T>::type
 755     load(void* addr) {
 756       if (is_hardwired_primitive<decorators>()) {
 757         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 758         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 759       } else {
 760         return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
 761       }
 762     }
 763 
 764     template <DecoratorSet decorators, typename T>
 765     inline static typename EnableIf<
 766       HasDecorator<decorators, AS_RAW>::value, T>::type
 767     load_at(oop base, ptrdiff_t offset) {
 768       return load<decorators, T>(field_addr(base, offset));
 769     }
 770 
 771     template <DecoratorSet decorators, typename T>
 772     inline static typename EnableIf<
 773       !HasDecorator<decorators, AS_RAW>::value, T>::type
 774     load_at(oop base, ptrdiff_t offset) {
 775       if (is_hardwired_primitive<decorators>()) {
 776         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 777         return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
 778       } else {
 779         return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
 780       }
 781     }
 782 
 783     template <DecoratorSet decorators, typename T>
 784     inline static typename EnableIf<
 785       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 786     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 787       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 788       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 789         return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
 790       } else {
 791         return Raw::atomic_cmpxchg(addr, compare_value, new_value);
 792       }
 793     }
 794 
 795     template <DecoratorSet decorators, typename T>
 796     inline static typename EnableIf<
 797       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 798     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 799       if (UseCompressedOops) {
 800         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 801         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 802       } else {
 803         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 804         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 805       }
 806     }
 807 
 808     template <DecoratorSet decorators, typename T>
 809     inline static typename EnableIf<
 810       !HasDecorator<decorators, AS_RAW>::value, T>::type
 811     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 812       if (is_hardwired_primitive<decorators>()) {
 813         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 814         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 815       } else {
 816         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
 817       }
 818     }
 819 
 820     template <DecoratorSet decorators, typename T>
 821     inline static typename EnableIf<
 822       HasDecorator<decorators, AS_RAW>::value, T>::type
 823     atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 824       return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
 825     }
 826 
 827     template <DecoratorSet decorators, typename T>
 828     inline static typename EnableIf<
 829       !HasDecorator<decorators, AS_RAW>::value, T>::type
 830     atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 831       if (is_hardwired_primitive<decorators>()) {
 832         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 833         return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
 834       } else {
 835         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
 836       }
 837     }
 838 
 839     template <DecoratorSet decorators, typename T>
 840     inline static typename EnableIf<
 841       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 842     atomic_xchg(void* addr, T new_value) {
 843       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 844       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 845         return Raw::oop_atomic_xchg(addr, new_value);
 846       } else {
 847         return Raw::atomic_xchg(addr, new_value);
 848       }
 849     }
 850 
 851     template <DecoratorSet decorators, typename T>
 852     inline static typename EnableIf<
 853       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 854     atomic_xchg(void* addr, T new_value) {
 855       if (UseCompressedOops) {
 856         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 857         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 858       } else {
 859         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 860         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 861       }
 862     }
 863 
 864     template <DecoratorSet decorators, typename T>
 865     inline static typename EnableIf<
 866       !HasDecorator<decorators, AS_RAW>::value, T>::type
 867     atomic_xchg(void* addr, T new_value) {
 868       if (is_hardwired_primitive<decorators>()) {
 869         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 870         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 871       } else {
 872         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
 873       }
 874     }
 875 
 876     template <DecoratorSet decorators, typename T>
 877     inline static typename EnableIf<
 878       HasDecorator<decorators, AS_RAW>::value, T>::type
 879     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 880       return atomic_xchg<decorators>(field_addr(base, offset), new_value);
 881     }
 882 
 883     template <DecoratorSet decorators, typename T>
 884     inline static typename EnableIf<
 885       !HasDecorator<decorators, AS_RAW>::value, T>::type
 886     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 887       if (is_hardwired_primitive<decorators>()) {
 888         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 889         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
 890       } else {
 891         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
 892       }
 893     }
 894 
 895     template <DecoratorSet decorators, typename T>
 896     inline static typename EnableIf<
 897       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, void>::type
 898     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 899               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 900               size_t length) {
 901       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 902       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 903         Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
 904                            dst_obj, dst_offset_in_bytes, dst_raw,
 905                            length);
 906       } else {
 907         Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 908                        dst_obj, dst_offset_in_bytes, dst_raw,
 909                        length);
 910       }
 911     }
 912 
 913     template <DecoratorSet decorators, typename T>
 914     inline static typename EnableIf<
 915       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, void>::type
 916     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 917               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 918               size_t length) {
 919       if (UseCompressedOops) {
 920         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 921         PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 922                                                            dst_obj, dst_offset_in_bytes, dst_raw,
 923                                                            length);
 924       } else {
 925         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 926         PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 927                                                            dst_obj, dst_offset_in_bytes, dst_raw,
 928                                                            length);
 929       }
 930     }
 931 
 932     template <DecoratorSet decorators, typename T>
 933     inline static typename EnableIf<
 934       !HasDecorator<decorators, AS_RAW>::value, void>::type
 935     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 936               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 937               size_t length) {
 938       if (is_hardwired_primitive<decorators>()) {
 939         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 940         PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 941                                                            dst_obj, dst_offset_in_bytes, dst_raw,
 942                                                            length);
 943       } else {
 944         RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 945                                                                      dst_obj, dst_offset_in_bytes, dst_raw,
 946                                                                      length);
 947       }
 948     }
 949 
 950     template <DecoratorSet decorators>
 951     inline static typename EnableIf<
 952       HasDecorator<decorators, AS_RAW>::value>::type
 953     clone(oop src, oop dst, size_t size) {
 954       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 955       Raw::clone(src, dst, size);
 956     }
 957 
 958     template <DecoratorSet decorators>
 959     inline static typename EnableIf<
 960       !HasDecorator<decorators, AS_RAW>::value>::type
 961     clone(oop src, oop dst, size_t size) {
 962       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 963     }
 964 
 965     template <DecoratorSet decorators>
 966     inline static typename EnableIf<
 967       HasDecorator<decorators, AS_RAW>::value>::type
 968     value_copy(void* src, void* dst, InlineKlass* md) {
 969       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 970       Raw::value_copy(src, dst, md);
 971     }
 972 
 973     template <DecoratorSet decorators>
 974     inline static typename EnableIf<
 975       !HasDecorator<decorators, AS_RAW>::value>::type
 976       value_copy(void* src, void* dst, InlineKlass* md) {
 977       const DecoratorSet expanded_decorators = decorators;
 978       RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md);
 979     }
 980   };
 981 
 982   // Step 2: Reduce types.
 983   // Enforce that for non-oop types, T and P have to be strictly the same.
 984   // P is the type of the address and T is the type of the values.
 985   // As for oop types, it is allow to send T in {narrowOop, oop} and
 986   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 987   // the subsequent table. (columns are P, rows are T)
 988   // |           | HeapWord  |   oop   | narrowOop |
 989   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 990   // | narrowOop |     x     |    x    |  hw-none  |
 991   //
 992   // x means not allowed
 993   // rt-comp means it must be checked at runtime whether the oop is compressed.
 994   // hw-none means it is statically known the oop will not be compressed.
 995   // hw-comp means it is statically known the oop will be compressed.
 996 
 997   template <DecoratorSet decorators, typename T>
 998   inline void store_reduce_types(T* addr, T value) {
 999     PreRuntimeDispatch::store<decorators>(addr, value);
1000   }
1001 
1002   template <DecoratorSet decorators>
1003   inline void store_reduce_types(narrowOop* addr, oop value) {
1004     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1005                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1006     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1007   }
1008 
1009   template <DecoratorSet decorators>
1010   inline void store_reduce_types(narrowOop* addr, narrowOop value) {
1011     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1012                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1013     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1014   }
1015 
1016   template <DecoratorSet decorators>
1017   inline void store_reduce_types(HeapWord* addr, oop value) {
1018     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1019     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1020   }
1021 
1022   template <DecoratorSet decorators, typename T>
1023   inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
1024     return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
1025   }
1026 
1027   template <DecoratorSet decorators>
1028   inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
1029     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1030                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1031     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1032   }
1033 
1034   template <DecoratorSet decorators>
1035   inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
1036     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1037                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1038     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1039   }
1040 
1041   template <DecoratorSet decorators>
1042   inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
1043                                          oop compare_value,
1044                                          oop new_value) {
1045     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1046     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1047   }
1048 
1049   template <DecoratorSet decorators, typename T>
1050   inline T atomic_xchg_reduce_types(T* addr, T new_value) {
1051     const DecoratorSet expanded_decorators = decorators;
1052     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1053   }
1054 
1055   template <DecoratorSet decorators>
1056   inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
1057     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1058                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1059     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1060   }
1061 
1062   template <DecoratorSet decorators>
1063   inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
1064     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1065                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1066     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1067   }
1068 
1069   template <DecoratorSet decorators>
1070   inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
1071     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1072     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1073   }
1074 
1075   template <DecoratorSet decorators, typename T>
1076   inline T load_reduce_types(T* addr) {
1077     return PreRuntimeDispatch::load<decorators, T>(addr);
1078   }
1079 
1080   template <DecoratorSet decorators, typename T>
1081   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1082     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1083                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1084     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1085   }
1086 
1087   template <DecoratorSet decorators, typename T>
1088   inline oop load_reduce_types(HeapWord* addr) {
1089     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1090     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1091   }
1092 
1093   template <DecoratorSet decorators, typename T>
1094   inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1095                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1096                                      size_t length) {
1097     PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1098                                               dst_obj, dst_offset_in_bytes, dst_raw,
1099                                               length);
1100   }
1101 
1102   template <DecoratorSet decorators>
1103   inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1104                                      arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1105                                      size_t length) {
1106     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1107     PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1108                                                        dst_obj, dst_offset_in_bytes, dst_raw,
1109                                                        length);
1110   }
1111 
1112   template <DecoratorSet decorators>
1113   inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1114                                      arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1115                                      size_t length) {
1116     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1117                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1118     PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1119                                                        dst_obj, dst_offset_in_bytes, dst_raw,
1120                                                        length);
1121   }
1122 
1123   // Step 1: Set default decorators. This step remembers if a type was volatile
1124   // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1125   // memory ordering is set for the access, and the implied decorator rules
1126   // are applied to select sensible defaults for decorators that have not been
1127   // explicitly set. For example, default object referent strength is set to strong.
1128   // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1129   // and references from the types). This step also perform some type verification
1130   // that the passed in types make sense.
1131 
1132   template <DecoratorSet decorators, typename T>
1133   static void verify_types(){
1134     // If this fails to compile, then you have sent in something that is
1135     // not recognized as a valid primitive type to a primitive Access function.
1136     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1137                    (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1138                     std::is_floating_point<T>::value)); // not allowed primitive type
1139   }
1140 
1141   template <DecoratorSet decorators, typename P, typename T>
1142   inline void store(P* addr, T value) {
1143     verify_types<decorators, T>();
1144     using DecayedP = std::decay_t<P>;
1145     using DecayedT = std::decay_t<T>;
1146     DecayedT decayed_value = value;
1147     // If a volatile address is passed in but no memory ordering decorator,
1148     // set the memory ordering to MO_RELAXED by default.
1149     const DecoratorSet expanded_decorators = DecoratorFixup<
1150       (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1151       (MO_RELAXED | decorators) : decorators>::value;
1152     store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1153   }
1154 
1155   template <DecoratorSet decorators, typename T>
1156   inline void store_at(oop base, ptrdiff_t offset, T value) {
1157     verify_types<decorators, T>();
1158     using DecayedT = std::decay_t<T>;
1159     DecayedT decayed_value = value;
1160     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1161                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1162                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1163     PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1164   }
1165 
1166   template <DecoratorSet decorators, typename P, typename T>
1167   inline T load(P* addr) {
1168     verify_types<decorators, T>();
1169     using DecayedP = std::decay_t<P>;
1170     using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1171                                         typename OopOrNarrowOop<T>::type,
1172                                         std::decay_t<T>>;
1173     // If a volatile address is passed in but no memory ordering decorator,
1174     // set the memory ordering to MO_RELAXED by default.
1175     const DecoratorSet expanded_decorators = DecoratorFixup<
1176       (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1177       (MO_RELAXED | decorators) : decorators>::value;
1178     return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1179   }
1180 
1181   template <DecoratorSet decorators, typename T>
1182   inline T load_at(oop base, ptrdiff_t offset) {
1183     verify_types<decorators, T>();
1184     using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1185                                         typename OopOrNarrowOop<T>::type,
1186                                         std::decay_t<T>>;
1187     // Expand the decorators (figure out sensible defaults)
1188     // Potentially remember if we need compressed oop awareness
1189     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1190                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1191                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1192     return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1193   }
1194 
1195   template <DecoratorSet decorators, typename P, typename T>
1196   inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1197     verify_types<decorators, T>();
1198     using DecayedP = std::decay_t<P>;
1199     using DecayedT = std::decay_t<T>;
1200     DecayedT new_decayed_value = new_value;
1201     DecayedT compare_decayed_value = compare_value;
1202     const DecoratorSet expanded_decorators = DecoratorFixup<
1203       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1204       (MO_SEQ_CST | decorators) : decorators>::value;
1205     return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1206                                                             compare_decayed_value,
1207                                                             new_decayed_value);
1208   }
1209 
1210   template <DecoratorSet decorators, typename T>
1211   inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1212     verify_types<decorators, T>();
1213     using DecayedT = std::decay_t<T>;
1214     DecayedT new_decayed_value = new_value;
1215     DecayedT compare_decayed_value = compare_value;
1216     // Determine default memory ordering
1217     const DecoratorSet expanded_decorators = DecoratorFixup<
1218       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1219       (MO_SEQ_CST | decorators) : decorators>::value;
1220     // Potentially remember that we need compressed oop awareness
1221     const DecoratorSet final_decorators = expanded_decorators |
1222                                           (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1223                                            INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1224     return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1225                                                                    new_decayed_value);
1226   }
1227 
1228   template <DecoratorSet decorators, typename P, typename T>
1229   inline T atomic_xchg(P* addr, T new_value) {
1230     verify_types<decorators, T>();
1231     using DecayedP = std::decay_t<P>;
1232     using DecayedT = std::decay_t<T>;
1233     DecayedT new_decayed_value = new_value;
1234     // atomic_xchg is only available in SEQ_CST flavour.
1235     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1236     return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1237                                                          new_decayed_value);
1238   }
1239 
1240   template <DecoratorSet decorators, typename T>
1241   inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1242     verify_types<decorators, T>();
1243     using DecayedT = std::decay_t<T>;
1244     DecayedT new_decayed_value = new_value;
1245     // atomic_xchg is only available in SEQ_CST flavour.
1246     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1247                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1248                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1249     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1250   }
1251 
1252   template <DecoratorSet decorators, typename T>
1253   inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1254                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1255                         size_t length) {
1256     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1257                    (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1258                     std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1259     using DecayedT = std::decay_t<T>;
1260     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1261     arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1262                                                 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1263                                                 length);
1264   }
1265 
1266   template <DecoratorSet decorators>
1267   inline void clone(oop src, oop dst, size_t size) {
1268     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1269     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1270   }
1271 
1272   template <DecoratorSet decorators>
1273   inline void value_copy(void* src, void* dst, InlineKlass* md) {
1274     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1275     PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md);
1276   }
1277 
1278   // Infer the type that should be returned from an Access::oop_load.
1279   template <typename P, DecoratorSet decorators>
1280   class OopLoadProxy: public StackObj {
1281   private:
1282     P *const _addr;
1283   public:
1284     explicit OopLoadProxy(P* addr) : _addr(addr) {}
1285 
1286     inline operator oop() {
1287       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1288     }
1289 
1290     inline operator narrowOop() {
1291       return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1292     }
1293 
1294     template <typename T>
1295     inline bool operator ==(const T& other) const {
1296       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1297     }
1298 
1299     template <typename T>
1300     inline bool operator !=(const T& other) const {
1301       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
1302     }
1303 
1304     inline bool operator ==(std::nullptr_t) const {
1305       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) == nullptr;
1306     }
1307 
1308     inline bool operator !=(std::nullptr_t) const {
1309       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) != nullptr;
1310     }
1311   };
1312 
1313   // Infer the type that should be returned from an Access::load_at.
1314   template <DecoratorSet decorators>
1315   class LoadAtProxy: public StackObj {
1316   private:
1317     const oop _base;
1318     const ptrdiff_t _offset;
1319   public:
1320     LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1321 
1322     template <typename T>
1323     inline operator T() const {
1324       return load_at<decorators, T>(_base, _offset);
1325     }
1326 
1327     template <typename T>
1328     inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
1329 
1330     template <typename T>
1331     inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
1332   };
1333 
1334   // Infer the type that should be returned from an Access::oop_load_at.
1335   template <DecoratorSet decorators>
1336   class OopLoadAtProxy: public StackObj {
1337   private:
1338     const oop _base;
1339     const ptrdiff_t _offset;
1340   public:
1341     OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1342 
1343     inline operator oop() const {
1344       return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
1345     }
1346 
1347     inline operator narrowOop() const {
1348       return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
1349     }
1350 
1351     template <typename T>
1352     inline bool operator ==(const T& other) const {
1353       return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
1354     }
1355 
1356     template <typename T>
1357     inline bool operator !=(const T& other) const {
1358       return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
1359     }
1360   };
1361 }
1362 
1363 #endif // SHARE_OOPS_ACCESSBACKEND_HPP