1 /*
  2  * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP
 27 #define OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP
 28 
 29 #include "runtime/vm_version.hpp"
 30 
 31 // Implementation of class atomic
 32 
 33 // Note that memory_order_conservative requires a full barrier after atomic stores.
 34 // See https://patchwork.kernel.org/patch/3575821/
 35 
 36 template<size_t byte_size>
 37 struct Atomic::PlatformAdd {
 38   template<typename D, typename I>
 39   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
 40     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
 41     FULL_MEM_BARRIER;
 42     return res;
 43   }
 44 
 45   template<typename D, typename I>
 46   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
 47     return add_and_fetch(dest, add_value, order) - add_value;
 48   }
 49 };
 50 
 51 template<size_t byte_size>
 52 template<typename T>
 53 inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
 54                                                      T exchange_value,
 55                                                      atomic_memory_order order) const {
 56   STATIC_ASSERT(byte_size == sizeof(T));
 57   T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
 58   FULL_MEM_BARRIER;
 59   return res;
 60 }
 61 
 62 // __attribute__((unused)) on dest is to get rid of spurious GCC warnings.
 63 template<size_t byte_size>
 64 template<typename T>
 65 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest __attribute__((unused)),
 66                                                         T compare_value,
 67                                                         T exchange_value,
 68                                                         atomic_memory_order order) const {
 69   STATIC_ASSERT(byte_size == sizeof(T));
 70   T value = compare_value;
 71   if (order != memory_order_relaxed) {
 72     FULL_MEM_BARRIER;
 73   }
 74 
 75   __atomic_compare_exchange(dest, &value, &exchange_value, /* weak */ false,
 76                             __ATOMIC_RELAXED, __ATOMIC_RELAXED);
 77 
 78   if (order != memory_order_relaxed) {
 79     FULL_MEM_BARRIER;
 80   }
 81   return value;
 82 }
 83 
 84 template<>
 85 template<typename T>
 86 inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)),
 87                                                 T compare_value,
 88                                                 T exchange_value,
 89                                                 atomic_memory_order order) const {
 90   STATIC_ASSERT(4 == sizeof(T));
 91   if (order != memory_order_relaxed) {
 92     FULL_MEM_BARRIER;
 93   }
 94   T rv;
 95   int tmp;
 96   __asm volatile(
 97     "1:\n\t"
 98     " addiw     %[tmp], %[cv], 0\n\t" // make sure compare_value signed_extend
 99     " lr.w.aq   %[rv], (%[dest])\n\t"
100     " bne       %[rv], %[tmp], 2f\n\t"
101     " sc.w.rl   %[tmp], %[ev], (%[dest])\n\t"
102     " bnez      %[tmp], 1b\n\t"
103     "2:\n\t"
104     : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
105     : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
106     : "memory");
107   if (order != memory_order_relaxed) {
108     FULL_MEM_BARRIER;
109   }
110   return rv;
111 }
112 
113 template<size_t byte_size>
114 struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
115 {
116   template <typename T>
117   T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
118 };
119 
120 template<size_t byte_size>
121 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
122 {
123   template <typename T>
124   void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
125 };
126 
127 template<size_t byte_size>
128 struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
129 {
130   template <typename T>
131   void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
132 };
133 
134 #endif // OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP