1 /*
   2  * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
  26 #define SHARE_OOPS_ACCESSBACKEND_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "oops/accessDecorators.hpp"
  32 #include "oops/oopsHierarchy.hpp"
  33 #include "runtime/globals.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 #include <type_traits>
  38 
  39 // This metafunction returns either oop or narrowOop depending on whether
  40 // an access needs to use compressed oops or not.
  41 template <DecoratorSet decorators>
  42 struct HeapOopType: AllStatic {
  43   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
  44                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  45   using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
  46 };
  47 
  48 namespace AccessInternal {
  49   enum BarrierType {
  50     BARRIER_STORE,
  51     BARRIER_STORE_AT,
  52     BARRIER_LOAD,
  53     BARRIER_LOAD_AT,
  54     BARRIER_ATOMIC_CMPXCHG,
  55     BARRIER_ATOMIC_CMPXCHG_AT,
  56     BARRIER_ATOMIC_XCHG,
  57     BARRIER_ATOMIC_XCHG_AT,
  58     BARRIER_ARRAYCOPY,
  59     BARRIER_CLONE
  60   };
  61 
  62   template <DecoratorSet decorators, typename T>
  63   struct MustConvertCompressedOop: public std::integral_constant<bool,
  64     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
  65     std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
  66     std::is_same<T, oop>::value> {};
  67 
  68   // This metafunction returns an appropriate oop type if the value is oop-like
  69   // and otherwise returns the same type T.
  70   template <DecoratorSet decorators, typename T>
  71   struct EncodedType: AllStatic {
  72     using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
  73                                     typename HeapOopType<decorators>::type,
  74                                     T>;
  75   };
  76 
  77   template <DecoratorSet decorators>
  78   inline typename HeapOopType<decorators>::type*
  79   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  80     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  81              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  82   }
  83 
  84   template <DecoratorSet decorators, typename T>
  85   struct AccessFunctionTypes {
  86     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
  87     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
  88     typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
  89     typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
  90 
  91     typedef T (*load_func_t)(void* addr);
  92     typedef void (*store_func_t)(void* addr, T value);
  93     typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
  94     typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
  95 
  96     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
  97                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
  98                                      size_t length);
  99     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
 100   };
 101 
 102   template <DecoratorSet decorators>
 103   struct AccessFunctionTypes<decorators, void> {
 104     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 105                                      arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 106                                      size_t length);
 107   };
 108 
 109   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 110 
 111 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 112   template <DecoratorSet decorators, typename T>                    \
 113   struct AccessFunction<decorators, T, bt>: AllStatic{              \
 114     typedef typename AccessFunctionTypes<decorators, T>::func type; \
 115   }
 116   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
 117   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
 118   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
 119   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
 120   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
 121   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
 122   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
 123   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
 124   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
 125   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
 126 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 127 
 128   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 129   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
 130 
 131   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 132   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
 133 
 134   void* field_addr(oop base, ptrdiff_t offset);
 135 
 136   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
 137   // faster build times, given how frequently included access is.
 138   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
 139   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
 140   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
 141 
 142   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
 143   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
 144 
 145   template<typename T>
 146   void arraycopy_conjoint(T* src, T* dst, size_t length);
 147   template<typename T>
 148   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
 149   template<typename T>
 150   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
 151 }
 152 
 153 // This mask specifies what decorators are relevant for raw accesses. When passing
 154 // accesses to the raw layer, irrelevant decorators are removed.
 155 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
 156                                         ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
 157 
 158 // The RawAccessBarrier performs raw accesses with additional knowledge of
 159 // memory ordering, so that OrderAccess/Atomic is called when necessary.
 160 // It additionally handles compressed oops, and hence is not completely "raw"
 161 // strictly speaking.
 162 template <DecoratorSet decorators>
 163 class RawAccessBarrier: public AllStatic {
 164 protected:
 165   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 166     return AccessInternal::field_addr(base, byte_offset);
 167   }
 168 
 169 protected:
 170   // Only encode if INTERNAL_VALUE_IS_OOP
 171   template <DecoratorSet idecorators, typename T>
 172   static inline typename EnableIf<
 173     AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
 174     typename HeapOopType<idecorators>::type>::type
 175   encode_internal(T value);
 176 
 177   template <DecoratorSet idecorators, typename T>
 178   static inline typename EnableIf<
 179     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 180   encode_internal(T value) {
 181     return value;
 182   }
 183 
 184   template <typename T>
 185   static inline typename AccessInternal::EncodedType<decorators, T>::type
 186   encode(T value) {
 187     return encode_internal<decorators, T>(value);
 188   }
 189 
 190   // Only decode if INTERNAL_VALUE_IS_OOP
 191   template <DecoratorSet idecorators, typename T>
 192   static inline typename EnableIf<
 193     AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 194   decode_internal(typename HeapOopType<idecorators>::type value);
 195 
 196   template <DecoratorSet idecorators, typename T>
 197   static inline typename EnableIf<
 198     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 199   decode_internal(T value) {
 200     return value;
 201   }
 202 
 203   template <typename T>
 204   static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
 205     return decode_internal<decorators, T>(value);
 206   }
 207 
 208 protected:
 209   template <DecoratorSet ds, typename T>
 210   static typename EnableIf<
 211     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 212   load_internal(void* addr);
 213 
 214   template <DecoratorSet ds, typename T>
 215   static typename EnableIf<
 216     HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 217   load_internal(void* addr);
 218 
 219   template <DecoratorSet ds, typename T>
 220   static typename EnableIf<
 221     HasDecorator<ds, MO_RELAXED>::value, T>::type
 222   load_internal(void* addr);
 223 
 224   template <DecoratorSet ds, typename T>
 225   static inline typename EnableIf<
 226     HasDecorator<ds, MO_UNORDERED>::value, T>::type
 227   load_internal(void* addr) {
 228     return *reinterpret_cast<T*>(addr);
 229   }
 230 
 231   template <DecoratorSet ds, typename T>
 232   static typename EnableIf<
 233     HasDecorator<ds, MO_SEQ_CST>::value>::type
 234   store_internal(void* addr, T value);
 235 
 236   template <DecoratorSet ds, typename T>
 237   static typename EnableIf<
 238     HasDecorator<ds, MO_RELEASE>::value>::type
 239   store_internal(void* addr, T value);
 240 
 241   template <DecoratorSet ds, typename T>
 242   static typename EnableIf<
 243     HasDecorator<ds, MO_RELAXED>::value>::type
 244   store_internal(void* addr, T value);
 245 
 246   template <DecoratorSet ds, typename T>
 247   static inline typename EnableIf<
 248     HasDecorator<ds, MO_UNORDERED>::value>::type
 249   store_internal(void* addr, T value) {
 250     *reinterpret_cast<T*>(addr) = value;
 251   }
 252 
 253   template <DecoratorSet ds, typename T>
 254   static typename EnableIf<
 255     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 256   atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
 257 
 258   template <DecoratorSet ds, typename T>
 259   static typename EnableIf<
 260     HasDecorator<ds, MO_RELAXED>::value, T>::type
 261   atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
 262 
 263   template <DecoratorSet ds, typename T>
 264   static typename EnableIf<
 265     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 266   atomic_xchg_internal(void* addr, T new_value);
 267 
 268 public:
 269   template <typename T>
 270   static inline void store(void* addr, T value) {
 271     store_internal<decorators>(addr, value);
 272   }
 273 
 274   template <typename T>
 275   static inline T load(void* addr) {
 276     return load_internal<decorators, T>(addr);
 277   }
 278 
 279   template <typename T>
 280   static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 281     return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
 282   }
 283 
 284   template <typename T>
 285   static inline T atomic_xchg(void* addr, T new_value) {
 286     return atomic_xchg_internal<decorators>(addr, new_value);
 287   }
 288 
 289   template <typename T>
 290   static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 291                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 292                         size_t length);
 293 
 294   template <typename T>
 295   static void oop_store(void* addr, T value);
 296   template <typename T>
 297   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 298 
 299   template <typename T>
 300   static T oop_load(void* addr);
 301   template <typename T>
 302   static T oop_load_at(oop base, ptrdiff_t offset);
 303 
 304   template <typename T>
 305   static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
 306   template <typename T>
 307   static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
 308 
 309   template <typename T>
 310   static T oop_atomic_xchg(void* addr, T new_value);
 311   template <typename T>
 312   static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
 313 
 314   template <typename T>
 315   static void store_at(oop base, ptrdiff_t offset, T value) {
 316     store(field_addr(base, offset), value);
 317   }
 318 
 319   template <typename T>
 320   static T load_at(oop base, ptrdiff_t offset) {
 321     return load<T>(field_addr(base, offset));
 322   }
 323 
 324   template <typename T>
 325   static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 326     return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
 327   }
 328 
 329   template <typename T>
 330   static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 331     return atomic_xchg(field_addr(base, offset), new_value);
 332   }
 333 
 334   template <typename T>
 335   static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 336                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 337                             size_t length);
 338 
 339   static void clone(oop src, oop dst, size_t size);
 340 };
 341 
 342 namespace AccessInternal {
 343   DEBUG_ONLY(void check_access_thread_state());
 344 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
 345 }
 346 
 347 // Below is the implementation of the first 4 steps of the template pipeline:
 348 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 349 //           and sets default decorators to sensible values.
 350 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 351 //           multiple types. The P type of the address and T type of the value must
 352 //           match.
 353 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 354 //           avoided, and in that case avoids it (calling raw accesses or
 355 //           primitive accesses in a build that does not require primitive GC barriers)
 356 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
 357 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
 358 //           to the access.
 359 
 360 namespace AccessInternal {
 361   template <typename T>
 362   struct OopOrNarrowOopInternal: AllStatic {
 363     typedef oop type;
 364   };
 365 
 366   template <>
 367   struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
 368     typedef narrowOop type;
 369   };
 370 
 371   // This metafunction returns a canonicalized oop/narrowOop type for a passed
 372   // in oop-like types passed in from oop_* overloads where the user has sworn
 373   // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
 374   // narrowOoop, instanceOopDesc*, and random other things).
 375   // In the oop_* overloads, it must hold that if the passed in type T is not
 376   // narrowOop, then it by contract has to be one of many oop-like types implicitly
 377   // convertible to oop, and hence returns oop as the canonical oop type.
 378   // If it turns out it was not, then the implicit conversion to oop will fail
 379   // to compile, as desired.
 380   template <typename T>
 381   struct OopOrNarrowOop: AllStatic {
 382     typedef typename OopOrNarrowOopInternal<std::decay_t<T>>::type type;
 383   };
 384 
 385   inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 386     return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
 387   }
 388   // Step 4: Runtime dispatch
 389   // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
 390   // accessor. This is required when the access either depends on whether compressed oops
 391   // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
 392   // barriers). The way it works is that a function pointer initially pointing to an
 393   // accessor resolution function gets called for each access. Upon first invocation,
 394   // it resolves which accessor to be used in future invocations and patches the
 395   // function pointer to this new accessor.
 396 
 397   template <DecoratorSet decorators, typename T, BarrierType type>
 398   struct RuntimeDispatch: AllStatic {};
 399 
 400   template <DecoratorSet decorators, typename T>
 401   struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
 402     typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
 403     static func_t _store_func;
 404 
 405     static void store_init(void* addr, T value);
 406 
 407     static inline void store(void* addr, T value) {
 408       assert_access_thread_state();
 409       _store_func(addr, value);
 410     }
 411   };
 412 
 413   template <DecoratorSet decorators, typename T>
 414   struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
 415     typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
 416     static func_t _store_at_func;
 417 
 418     static void store_at_init(oop base, ptrdiff_t offset, T value);
 419 
 420     static inline void store_at(oop base, ptrdiff_t offset, T value) {
 421       assert_access_thread_state();
 422       _store_at_func(base, offset, value);
 423     }
 424   };
 425 
 426   template <DecoratorSet decorators, typename T>
 427   struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
 428     typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
 429     static func_t _load_func;
 430 
 431     static T load_init(void* addr);
 432 
 433     static inline T load(void* addr) {
 434       assert_access_thread_state();
 435       return _load_func(addr);
 436     }
 437   };
 438 
 439   template <DecoratorSet decorators, typename T>
 440   struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
 441     typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
 442     static func_t _load_at_func;
 443 
 444     static T load_at_init(oop base, ptrdiff_t offset);
 445 
 446     static inline T load_at(oop base, ptrdiff_t offset) {
 447       assert_access_thread_state();
 448       return _load_at_func(base, offset);
 449     }
 450   };
 451 
 452   template <DecoratorSet decorators, typename T>
 453   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
 454     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
 455     static func_t _atomic_cmpxchg_func;
 456 
 457     static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
 458 
 459     static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 460       assert_access_thread_state();
 461       return _atomic_cmpxchg_func(addr, compare_value, new_value);
 462     }
 463   };
 464 
 465   template <DecoratorSet decorators, typename T>
 466   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
 467     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
 468     static func_t _atomic_cmpxchg_at_func;
 469 
 470     static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
 471 
 472     static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 473       assert_access_thread_state();
 474       return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
 475     }
 476   };
 477 
 478   template <DecoratorSet decorators, typename T>
 479   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
 480     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
 481     static func_t _atomic_xchg_func;
 482 
 483     static T atomic_xchg_init(void* addr, T new_value);
 484 
 485     static inline T atomic_xchg(void* addr, T new_value) {
 486       assert_access_thread_state();
 487       return _atomic_xchg_func(addr, new_value);
 488     }
 489   };
 490 
 491   template <DecoratorSet decorators, typename T>
 492   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 493     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 494     static func_t _atomic_xchg_at_func;
 495 
 496     static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
 497 
 498     static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 499       assert_access_thread_state();
 500       return _atomic_xchg_at_func(base, offset, new_value);
 501     }
 502   };
 503 
 504   template <DecoratorSet decorators, typename T>
 505   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 506     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 507     static func_t _arraycopy_func;
 508 
 509     static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 510                                arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 511                                size_t length);
 512 
 513     static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 514                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 515                                  size_t length) {
 516       assert_access_thread_state();
 517       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 518                              dst_obj, dst_offset_in_bytes, dst_raw,
 519                              length);
 520     }
 521   };
 522 
 523   template <DecoratorSet decorators, typename T>
 524   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 525     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 526     static func_t _clone_func;
 527 
 528     static void clone_init(oop src, oop dst, size_t size);
 529 
 530     static inline void clone(oop src, oop dst, size_t size) {
 531       assert_access_thread_state();
 532       _clone_func(src, dst, size);
 533     }
 534   };
 535 
 536   // Initialize the function pointers to point to the resolving function.
 537   template <DecoratorSet decorators, typename T>
 538   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 539   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 540 
 541   template <DecoratorSet decorators, typename T>
 542   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 543   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 544 
 545   template <DecoratorSet decorators, typename T>
 546   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 547   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 548 
 549   template <DecoratorSet decorators, typename T>
 550   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 551   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 552 
 553   template <DecoratorSet decorators, typename T>
 554   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 555   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
 556 
 557   template <DecoratorSet decorators, typename T>
 558   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 559   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 560 
 561   template <DecoratorSet decorators, typename T>
 562   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 563   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 564 
 565   template <DecoratorSet decorators, typename T>
 566   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 567   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 568 
 569   template <DecoratorSet decorators, typename T>
 570   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 571   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 572 
 573   template <DecoratorSet decorators, typename T>
 574   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 575   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 576 
 577   // Step 3: Pre-runtime dispatching.
 578   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 579   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 580   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 581   // not possible.
 582   struct PreRuntimeDispatch: AllStatic {
 583     template<DecoratorSet decorators>
 584     struct CanHardwireRaw: public std::integral_constant<
 585       bool,
 586       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 587       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 588       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 589     {};
 590 
 591     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 592 
 593     template<DecoratorSet decorators>
 594     static bool is_hardwired_primitive() {
 595       return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 596     }
 597 
 598     template <DecoratorSet decorators, typename T>
 599     inline static typename EnableIf<
 600       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
 601     store(void* addr, T value) {
 602       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 603       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 604         Raw::oop_store(addr, value);
 605       } else {
 606         Raw::store(addr, value);
 607       }
 608     }
 609 
 610     template <DecoratorSet decorators, typename T>
 611     inline static typename EnableIf<
 612       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
 613     store(void* addr, T value) {
 614       if (UseCompressedOops) {
 615         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 616         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 617       } else {
 618         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 619         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 620       }
 621     }
 622 
 623     template <DecoratorSet decorators, typename T>
 624     inline static typename EnableIf<
 625       !HasDecorator<decorators, AS_RAW>::value>::type
 626     store(void* addr, T value) {
 627       if (is_hardwired_primitive<decorators>()) {
 628         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 629         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 630       } else {
 631         RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
 632       }
 633     }
 634 
 635     template <DecoratorSet decorators, typename T>
 636     inline static typename EnableIf<
 637       HasDecorator<decorators, AS_RAW>::value>::type
 638     store_at(oop base, ptrdiff_t offset, T value) {
 639       store<decorators>(field_addr(base, offset), value);
 640     }
 641 
 642     template <DecoratorSet decorators, typename T>
 643     inline static typename EnableIf<
 644       !HasDecorator<decorators, AS_RAW>::value>::type
 645     store_at(oop base, ptrdiff_t offset, T value) {
 646       if (is_hardwired_primitive<decorators>()) {
 647         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 648         PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
 649       } else {
 650         RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
 651       }
 652     }
 653 
 654     template <DecoratorSet decorators, typename T>
 655     inline static typename EnableIf<
 656       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 657     load(void* addr) {
 658       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 659       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 660         return Raw::template oop_load<T>(addr);
 661       } else {
 662         return Raw::template load<T>(addr);
 663       }
 664     }
 665 
 666     template <DecoratorSet decorators, typename T>
 667     inline static typename EnableIf<
 668       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 669     load(void* addr) {
 670       if (UseCompressedOops) {
 671         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 672         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 673       } else {
 674         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 675         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 676       }
 677     }
 678 
 679     template <DecoratorSet decorators, typename T>
 680     inline static typename EnableIf<
 681       !HasDecorator<decorators, AS_RAW>::value, T>::type
 682     load(void* addr) {
 683       if (is_hardwired_primitive<decorators>()) {
 684         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 685         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 686       } else {
 687         return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
 688       }
 689     }
 690 
 691     template <DecoratorSet decorators, typename T>
 692     inline static typename EnableIf<
 693       HasDecorator<decorators, AS_RAW>::value, T>::type
 694     load_at(oop base, ptrdiff_t offset) {
 695       return load<decorators, T>(field_addr(base, offset));
 696     }
 697 
 698     template <DecoratorSet decorators, typename T>
 699     inline static typename EnableIf<
 700       !HasDecorator<decorators, AS_RAW>::value, T>::type
 701     load_at(oop base, ptrdiff_t offset) {
 702       if (is_hardwired_primitive<decorators>()) {
 703         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 704         return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
 705       } else {
 706         return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
 707       }
 708     }
 709 
 710     template <DecoratorSet decorators, typename T>
 711     inline static typename EnableIf<
 712       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 713     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 714       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 715       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 716         return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
 717       } else {
 718         return Raw::atomic_cmpxchg(addr, compare_value, new_value);
 719       }
 720     }
 721 
 722     template <DecoratorSet decorators, typename T>
 723     inline static typename EnableIf<
 724       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 725     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 726       if (UseCompressedOops) {
 727         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 728         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 729       } else {
 730         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 731         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 732       }
 733     }
 734 
 735     template <DecoratorSet decorators, typename T>
 736     inline static typename EnableIf<
 737       !HasDecorator<decorators, AS_RAW>::value, T>::type
 738     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 739       if (is_hardwired_primitive<decorators>()) {
 740         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 741         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 742       } else {
 743         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
 744       }
 745     }
 746 
 747     template <DecoratorSet decorators, typename T>
 748     inline static typename EnableIf<
 749       HasDecorator<decorators, AS_RAW>::value, T>::type
 750     atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 751       return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
 752     }
 753 
 754     template <DecoratorSet decorators, typename T>
 755     inline static typename EnableIf<
 756       !HasDecorator<decorators, AS_RAW>::value, T>::type
 757     atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 758       if (is_hardwired_primitive<decorators>()) {
 759         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 760         return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
 761       } else {
 762         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
 763       }
 764     }
 765 
 766     template <DecoratorSet decorators, typename T>
 767     inline static typename EnableIf<
 768       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 769     atomic_xchg(void* addr, T new_value) {
 770       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 771       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 772         return Raw::oop_atomic_xchg(addr, new_value);
 773       } else {
 774         return Raw::atomic_xchg(addr, new_value);
 775       }
 776     }
 777 
 778     template <DecoratorSet decorators, typename T>
 779     inline static typename EnableIf<
 780       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 781     atomic_xchg(void* addr, T new_value) {
 782       if (UseCompressedOops) {
 783         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 784         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 785       } else {
 786         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 787         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 788       }
 789     }
 790 
 791     template <DecoratorSet decorators, typename T>
 792     inline static typename EnableIf<
 793       !HasDecorator<decorators, AS_RAW>::value, T>::type
 794     atomic_xchg(void* addr, T new_value) {
 795       if (is_hardwired_primitive<decorators>()) {
 796         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 797         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 798       } else {
 799         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
 800       }
 801     }
 802 
 803     template <DecoratorSet decorators, typename T>
 804     inline static typename EnableIf<
 805       HasDecorator<decorators, AS_RAW>::value, T>::type
 806     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 807       return atomic_xchg<decorators>(field_addr(base, offset), new_value);
 808     }
 809 
 810     template <DecoratorSet decorators, typename T>
 811     inline static typename EnableIf<
 812       !HasDecorator<decorators, AS_RAW>::value, T>::type
 813     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 814       if (is_hardwired_primitive<decorators>()) {
 815         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 816         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
 817       } else {
 818         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
 819       }
 820     }
 821 
 822     template <DecoratorSet decorators, typename T>
 823     inline static typename EnableIf<
 824       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
 825     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 826               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 827               size_t length) {
 828       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 829       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 830         return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
 831                                   dst_obj, dst_offset_in_bytes, dst_raw,
 832                                   length);
 833       } else {
 834         return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 835                               dst_obj, dst_offset_in_bytes, dst_raw,
 836                               length);
 837       }
 838     }
 839 
 840     template <DecoratorSet decorators, typename T>
 841     inline static typename EnableIf<
 842       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
 843     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 844               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 845               size_t length) {
 846       if (UseCompressedOops) {
 847         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 848         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 849                                                                   dst_obj, dst_offset_in_bytes, dst_raw,
 850                                                                   length);
 851       } else {
 852         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 853         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 854                                                                   dst_obj, dst_offset_in_bytes, dst_raw,
 855                                                                   length);
 856       }
 857     }
 858 
 859     template <DecoratorSet decorators, typename T>
 860     inline static typename EnableIf<
 861       !HasDecorator<decorators, AS_RAW>::value, bool>::type
 862     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 863               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 864               size_t length) {
 865       if (is_hardwired_primitive<decorators>()) {
 866         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 867         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
 868                                                                   dst_obj, dst_offset_in_bytes, dst_raw,
 869                                                                   length);
 870       } else {
 871         return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 872                                                                             dst_obj, dst_offset_in_bytes, dst_raw,
 873                                                                             length);
 874       }
 875     }
 876 
 877     template <DecoratorSet decorators>
 878     inline static typename EnableIf<
 879       HasDecorator<decorators, AS_RAW>::value>::type
 880     clone(oop src, oop dst, size_t size) {
 881       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 882       Raw::clone(src, dst, size);
 883     }
 884 
 885     template <DecoratorSet decorators>
 886     inline static typename EnableIf<
 887       !HasDecorator<decorators, AS_RAW>::value>::type
 888     clone(oop src, oop dst, size_t size) {
 889       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 890     }
 891   };
 892 
 893   // Step 2: Reduce types.
 894   // Enforce that for non-oop types, T and P have to be strictly the same.
 895   // P is the type of the address and T is the type of the values.
 896   // As for oop types, it is allow to send T in {narrowOop, oop} and
 897   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 898   // the subsequent table. (columns are P, rows are T)
 899   // |           | HeapWord  |   oop   | narrowOop |
 900   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 901   // | narrowOop |     x     |    x    |  hw-none  |
 902   //
 903   // x means not allowed
 904   // rt-comp means it must be checked at runtime whether the oop is compressed.
 905   // hw-none means it is statically known the oop will not be compressed.
 906   // hw-comp means it is statically known the oop will be compressed.
 907 
 908   template <DecoratorSet decorators, typename T>
 909   inline void store_reduce_types(T* addr, T value) {
 910     PreRuntimeDispatch::store<decorators>(addr, value);
 911   }
 912 
 913   template <DecoratorSet decorators>
 914   inline void store_reduce_types(narrowOop* addr, oop value) {
 915     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 916                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 917     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 918   }
 919 
 920   template <DecoratorSet decorators>
 921   inline void store_reduce_types(narrowOop* addr, narrowOop value) {
 922     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 923                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 924     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 925   }
 926 
 927   template <DecoratorSet decorators>
 928   inline void store_reduce_types(HeapWord* addr, oop value) {
 929     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 930     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 931   }
 932 
 933   template <DecoratorSet decorators, typename T>
 934   inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
 935     return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
 936   }
 937 
 938   template <DecoratorSet decorators>
 939   inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
 940     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 941                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 942     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 943   }
 944 
 945   template <DecoratorSet decorators>
 946   inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
 947     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 948                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 949     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 950   }
 951 
 952   template <DecoratorSet decorators>
 953   inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
 954                                          oop compare_value,
 955                                          oop new_value) {
 956     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 957     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 958   }
 959 
 960   template <DecoratorSet decorators, typename T>
 961   inline T atomic_xchg_reduce_types(T* addr, T new_value) {
 962     const DecoratorSet expanded_decorators = decorators;
 963     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 964   }
 965 
 966   template <DecoratorSet decorators>
 967   inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
 968     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 969                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 970     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 971   }
 972 
 973   template <DecoratorSet decorators>
 974   inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
 975     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 976                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 977     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 978   }
 979 
 980   template <DecoratorSet decorators>
 981   inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
 982     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 983     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 984   }
 985 
 986   template <DecoratorSet decorators, typename T>
 987   inline T load_reduce_types(T* addr) {
 988     return PreRuntimeDispatch::load<decorators, T>(addr);
 989   }
 990 
 991   template <DecoratorSet decorators, typename T>
 992   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
 993     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 994                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 995     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
 996   }
 997 
 998   template <DecoratorSet decorators, typename T>
 999   inline oop load_reduce_types(HeapWord* addr) {
1000     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1001     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1002   }
1003 
1004   template <DecoratorSet decorators, typename T>
1005   inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1006                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1007                                      size_t length) {
1008     return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1009                                                      dst_obj, dst_offset_in_bytes, dst_raw,
1010                                                      length);
1011   }
1012 
1013   template <DecoratorSet decorators>
1014   inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1015                                      arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1016                                      size_t length) {
1017     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1018     return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1019                                                               dst_obj, dst_offset_in_bytes, dst_raw,
1020                                                               length);
1021   }
1022 
1023   template <DecoratorSet decorators>
1024   inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1025                                      arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1026                                      size_t length) {
1027     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1028                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1029     return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1030                                                               dst_obj, dst_offset_in_bytes, dst_raw,
1031                                                               length);
1032   }
1033 
1034   // Step 1: Set default decorators. This step remembers if a type was volatile
1035   // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1036   // memory ordering is set for the access, and the implied decorator rules
1037   // are applied to select sensible defaults for decorators that have not been
1038   // explicitly set. For example, default object referent strength is set to strong.
1039   // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1040   // and references from the types). This step also perform some type verification
1041   // that the passed in types make sense.
1042 
1043   template <DecoratorSet decorators, typename T>
1044   static void verify_types(){
1045     // If this fails to compile, then you have sent in something that is
1046     // not recognized as a valid primitive type to a primitive Access function.
1047     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1048                    (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1049                     std::is_floating_point<T>::value)); // not allowed primitive type
1050   }
1051 
1052   template <DecoratorSet decorators, typename P, typename T>
1053   inline void store(P* addr, T value) {
1054     verify_types<decorators, T>();
1055     using DecayedP = std::decay_t<P>;
1056     using DecayedT = std::decay_t<T>;
1057     DecayedT decayed_value = value;
1058     // If a volatile address is passed in but no memory ordering decorator,
1059     // set the memory ordering to MO_RELAXED by default.
1060     const DecoratorSet expanded_decorators = DecoratorFixup<
1061       (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1062       (MO_RELAXED | decorators) : decorators>::value;
1063     store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1064   }
1065 
1066   template <DecoratorSet decorators, typename T>
1067   inline void store_at(oop base, ptrdiff_t offset, T value) {
1068     verify_types<decorators, T>();
1069     using DecayedT = std::decay_t<T>;
1070     DecayedT decayed_value = value;
1071     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1072                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1073                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1074     PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1075   }
1076 
1077   template <DecoratorSet decorators, typename P, typename T>
1078   inline T load(P* addr) {
1079     verify_types<decorators, T>();
1080     using DecayedP = std::decay_t<P>;
1081     using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1082                                         typename OopOrNarrowOop<T>::type,
1083                                         std::decay_t<T>>;
1084     // If a volatile address is passed in but no memory ordering decorator,
1085     // set the memory ordering to MO_RELAXED by default.
1086     const DecoratorSet expanded_decorators = DecoratorFixup<
1087       (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1088       (MO_RELAXED | decorators) : decorators>::value;
1089     return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1090   }
1091 
1092   template <DecoratorSet decorators, typename T>
1093   inline T load_at(oop base, ptrdiff_t offset) {
1094     verify_types<decorators, T>();
1095     using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1096                                         typename OopOrNarrowOop<T>::type,
1097                                         std::decay_t<T>>;
1098     // Expand the decorators (figure out sensible defaults)
1099     // Potentially remember if we need compressed oop awareness
1100     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1101                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1102                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1103     return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1104   }
1105 
1106   template <DecoratorSet decorators, typename P, typename T>
1107   inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1108     verify_types<decorators, T>();
1109     using DecayedP = std::decay_t<P>;
1110     using DecayedT = std::decay_t<T>;
1111     DecayedT new_decayed_value = new_value;
1112     DecayedT compare_decayed_value = compare_value;
1113     const DecoratorSet expanded_decorators = DecoratorFixup<
1114       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1115       (MO_SEQ_CST | decorators) : decorators>::value;
1116     return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1117                                                             compare_decayed_value,
1118                                                             new_decayed_value);
1119   }
1120 
1121   template <DecoratorSet decorators, typename T>
1122   inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1123     verify_types<decorators, T>();
1124     using DecayedT = std::decay_t<T>;
1125     DecayedT new_decayed_value = new_value;
1126     DecayedT compare_decayed_value = compare_value;
1127     // Determine default memory ordering
1128     const DecoratorSet expanded_decorators = DecoratorFixup<
1129       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1130       (MO_SEQ_CST | decorators) : decorators>::value;
1131     // Potentially remember that we need compressed oop awareness
1132     const DecoratorSet final_decorators = expanded_decorators |
1133                                           (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1134                                            INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1135     return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1136                                                                    new_decayed_value);
1137   }
1138 
1139   template <DecoratorSet decorators, typename P, typename T>
1140   inline T atomic_xchg(P* addr, T new_value) {
1141     verify_types<decorators, T>();
1142     using DecayedP = std::decay_t<P>;
1143     using DecayedT = std::decay_t<T>;
1144     DecayedT new_decayed_value = new_value;
1145     // atomic_xchg is only available in SEQ_CST flavour.
1146     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1147     return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1148                                                          new_decayed_value);
1149   }
1150 
1151   template <DecoratorSet decorators, typename T>
1152   inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1153     verify_types<decorators, T>();
1154     using DecayedT = std::decay_t<T>;
1155     DecayedT new_decayed_value = new_value;
1156     // atomic_xchg is only available in SEQ_CST flavour.
1157     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1158                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1159                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1160     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1161   }
1162 
1163   template <DecoratorSet decorators, typename T>
1164   inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1165                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1166                         size_t length) {
1167     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1168                    (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1169                     std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1170     using DecayedT = std::decay_t<T>;
1171     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1172     return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1173                                                        dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1174                                                        length);
1175   }
1176 
1177   template <DecoratorSet decorators>
1178   inline void clone(oop src, oop dst, size_t size) {
1179     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1180     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1181   }
1182 
1183   // Infer the type that should be returned from an Access::oop_load.
1184   template <typename P, DecoratorSet decorators>
1185   class OopLoadProxy: public StackObj {
1186   private:
1187     P *const _addr;
1188   public:
1189     explicit OopLoadProxy(P* addr) : _addr(addr) {}
1190 
1191     inline operator oop() {
1192       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1193     }
1194 
1195     inline operator narrowOop() {
1196       return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1197     }
1198 
1199     template <typename T>
1200     inline bool operator ==(const T& other) const {
1201       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1202     }
1203 
1204     template <typename T>
1205     inline bool operator !=(const T& other) const {
1206       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
1207     }
1208 
1209     inline bool operator ==(std::nullptr_t) const {
1210       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) == nullptr;
1211     }
1212 
1213     inline bool operator !=(std::nullptr_t) const {
1214       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) != nullptr;
1215     }
1216   };
1217 
1218   // Infer the type that should be returned from an Access::load_at.
1219   template <DecoratorSet decorators>
1220   class LoadAtProxy: public StackObj {
1221   private:
1222     const oop _base;
1223     const ptrdiff_t _offset;
1224   public:
1225     LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1226 
1227     template <typename T>
1228     inline operator T() const {
1229       return load_at<decorators, T>(_base, _offset);
1230     }
1231 
1232     template <typename T>
1233     inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
1234 
1235     template <typename T>
1236     inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
1237   };
1238 
1239   // Infer the type that should be returned from an Access::oop_load_at.
1240   template <DecoratorSet decorators>
1241   class OopLoadAtProxy: public StackObj {
1242   private:
1243     const oop _base;
1244     const ptrdiff_t _offset;
1245   public:
1246     OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1247 
1248     inline operator oop() const {
1249       return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
1250     }
1251 
1252     inline operator narrowOop() const {
1253       return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
1254     }
1255 
1256     template <typename T>
1257     inline bool operator ==(const T& other) const {
1258       return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
1259     }
1260 
1261     template <typename T>
1262     inline bool operator !=(const T& other) const {
1263       return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
1264     }
1265   };
1266 }
1267 
1268 #endif // SHARE_OOPS_ACCESSBACKEND_HPP