< prev index next >

src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp

Print this page




  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  27 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  28 
  29 #include "vm_version_aarch64.hpp"
  30 
  31 // Implementation of class atomic
  32 
  33 #define FULL_MEM_BARRIER  __sync_synchronize()
  34 #define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
  35 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
  36 
  37 template<size_t byte_size>
  38 struct Atomic::PlatformAdd
  39   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  40 {
  41   template<typename I, typename D>
  42   D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
  43     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
  44     FULL_MEM_BARRIER;
  45     return res;
  46   }
  47 };
  48 
  49 template<size_t byte_size>
  50 template<typename T>
  51 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
  52                                                      T volatile* dest,
  53                                                      atomic_memory_order order) const {
  54   STATIC_ASSERT(byte_size == sizeof(T));
  55   T res = __sync_lock_test_and_set(dest, exchange_value);
  56   FULL_MEM_BARRIER;
  57   return res;
  58 }
  59 
  60 template<size_t byte_size>
  61 template<typename T>
  62 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  63                                                         T volatile* dest,
  64                                                         T compare_value,
  65                                                         atomic_memory_order order) const {


  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  27 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
  28 
  29 #include "vm_version_aarch64.hpp"
  30 
  31 // Implementation of class atomic
  32 
  33 #define FULL_MEM_BARRIER  __sync_synchronize()
  34 #define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
  35 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
  36 
  37 template<size_t byte_size>
  38 struct Atomic::PlatformAdd
  39   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  40 {
  41   template<typename I, typename D>
  42   D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
  43     return __sync_add_and_fetch(dest, add_value);


  44   }
  45 };
  46 
  47 template<size_t byte_size>
  48 template<typename T>
  49 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
  50                                                      T volatile* dest,
  51                                                      atomic_memory_order order) const {
  52   STATIC_ASSERT(byte_size == sizeof(T));
  53   T res = __sync_lock_test_and_set(dest, exchange_value);
  54   FULL_MEM_BARRIER;
  55   return res;
  56 }
  57 
  58 template<size_t byte_size>
  59 template<typename T>
  60 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
  61                                                         T volatile* dest,
  62                                                         T compare_value,
  63                                                         atomic_memory_order order) const {
< prev index next >