1 /* 2 * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP 27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP 28 29 #include "runtime/atomic.hpp" 30 #include "runtime/os.hpp" 31 #include "vm_version_zero.hpp" 32 33 // Implementation of class atomic 34 35 #ifdef M68K 36 37 /* 38 * __m68k_cmpxchg 39 * 40 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 41 * Returns newval on success and oldval if no exchange happened. 42 * This implementation is processor specific and works on 43 * 68020 68030 68040 and 68060. 44 * 45 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS 46 * instruction. 47 * Using a kernelhelper would be better for arch complete implementation. 48 * 49 */ 50 51 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) { 52 int ret; 53 __asm __volatile ("cas%.l %0,%2,%1" 54 : "=d" (ret), "+m" (*(ptr)) 55 : "d" (newval), "0" (oldval)); 56 return ret; 57 } 58 59 /* Perform an atomic compare and swap: if the current value of `*PTR' 60 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of 61 `*PTR' before the operation.*/ 62 static inline int m68k_compare_and_swap(volatile int *ptr, 63 int oldval, 64 int newval) { 65 for (;;) { 66 int prev = *ptr; 67 if (prev != oldval) 68 return prev; 69 70 if (__m68k_cmpxchg (prev, newval, ptr) == newval) 71 // Success. 72 return prev; 73 74 // We failed even though prev == oldval. Try again. 75 } 76 } 77 78 /* Atomically add an int to memory. */ 79 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { 80 for (;;) { 81 // Loop until success. 82 83 int prev = *ptr; 84 85 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value) 86 return prev + add_value; 87 } 88 } 89 90 /* Atomically write VALUE into `*PTR' and returns the previous 91 contents of `*PTR'. */ 92 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) { 93 for (;;) { 94 // Loop until success. 95 int prev = *ptr; 96 97 if (__m68k_cmpxchg (prev, newval, ptr) == prev) 98 return prev; 99 } 100 } 101 #endif // M68K 102 103 #ifdef ARM 104 105 /* 106 * __kernel_cmpxchg 107 * 108 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 109 * Return zero if *ptr was changed or non-zero if no exchange happened. 110 * The C flag is also set if *ptr was changed to allow for assembly 111 * optimization in the calling code. 112 * 113 */ 114 115 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr); 116 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0) 117 118 119 120 /* Perform an atomic compare and swap: if the current value of `*PTR' 121 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of 122 `*PTR' before the operation.*/ 123 static inline int arm_compare_and_swap(volatile int *ptr, 124 int oldval, 125 int newval) { 126 for (;;) { 127 int prev = *ptr; 128 if (prev != oldval) 129 return prev; 130 131 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 132 // Success. 133 return prev; 134 135 // We failed even though prev == oldval. Try again. 136 } 137 } 138 139 /* Atomically add an int to memory. */ 140 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { 141 for (;;) { 142 // Loop until a __kernel_cmpxchg succeeds. 143 144 int prev = *ptr; 145 146 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0) 147 return prev + add_value; 148 } 149 } 150 151 /* Atomically write VALUE into `*PTR' and returns the previous 152 contents of `*PTR'. */ 153 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) { 154 for (;;) { 155 // Loop until a __kernel_cmpxchg succeeds. 156 int prev = *ptr; 157 158 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 159 return prev; 160 } 161 } 162 #endif // ARM 163 164 inline void Atomic::store(jint store_value, volatile jint* dest) { 165 *dest = store_value; 166 } 167 168 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { 169 *dest = store_value; 170 } 171 172 inline jint Atomic::add(jint add_value, volatile jint* dest) { 173 #ifdef ARM 174 return arm_add_and_fetch(dest, add_value); 175 #else 176 #ifdef M68K 177 return m68k_add_and_fetch(dest, add_value); 178 #else 179 return __sync_add_and_fetch(dest, add_value); 180 #endif // M68K 181 #endif // ARM 182 } 183 184 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 185 #ifdef ARM 186 return arm_add_and_fetch(dest, add_value); 187 #else 188 #ifdef M68K 189 return m68k_add_and_fetch(dest, add_value); 190 #else 191 return __sync_add_and_fetch(dest, add_value); 192 #endif // M68K 193 #endif // ARM 194 } 195 196 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 197 return (void *) add_ptr(add_value, (volatile intptr_t *) dest); 198 } 199 200 inline void Atomic::inc(volatile jint* dest) { 201 add(1, dest); 202 } 203 204 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 205 add_ptr(1, dest); 206 } 207 208 inline void Atomic::inc_ptr(volatile void* dest) { 209 add_ptr(1, dest); 210 } 211 212 inline void Atomic::dec(volatile jint* dest) { 213 add(-1, dest); 214 } 215 216 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 217 add_ptr(-1, dest); 218 } 219 220 inline void Atomic::dec_ptr(volatile void* dest) { 221 add_ptr(-1, dest); 222 } 223 224 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { 225 #ifdef ARM 226 return arm_lock_test_and_set(dest, exchange_value); 227 #else 228 #ifdef M68K 229 return m68k_lock_test_and_set(dest, exchange_value); 230 #else 231 // __sync_lock_test_and_set is a bizarrely named atomic exchange 232 // operation. Note that some platforms only support this with the 233 // limitation that the only valid value to store is the immediate 234 // constant 1. There is a test for this in JNI_CreateJavaVM(). 235 jint result = __sync_lock_test_and_set (dest, exchange_value); 236 // All atomic operations are expected to be full memory barriers 237 // (see atomic.hpp). However, __sync_lock_test_and_set is not 238 // a full memory barrier, but an acquire barrier. Hence, this added 239 // barrier. 240 __sync_synchronize(); 241 return result; 242 #endif // M68K 243 #endif // ARM 244 } 245 246 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, 247 volatile intptr_t* dest) { 248 #ifdef ARM 249 return arm_lock_test_and_set(dest, exchange_value); 250 #else 251 #ifdef M68K 252 return m68k_lock_test_and_set(dest, exchange_value); 253 #else 254 intptr_t result = __sync_lock_test_and_set (dest, exchange_value); 255 __sync_synchronize(); 256 return result; 257 #endif // M68K 258 #endif // ARM 259 } 260 261 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 262 return (void *) xchg_ptr((intptr_t) exchange_value, 263 (volatile intptr_t*) dest); 264 } 265 266 inline jint Atomic::cmpxchg(jint exchange_value, 267 volatile jint* dest, 268 jint compare_value) { 269 #ifdef ARM 270 return arm_compare_and_swap(dest, compare_value, exchange_value); 271 #else 272 #ifdef M68K 273 return m68k_compare_and_swap(dest, compare_value, exchange_value); 274 #else 275 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 276 #endif // M68K 277 #endif // ARM 278 } 279 280 inline jlong Atomic::cmpxchg(jlong exchange_value, 281 volatile jlong* dest, 282 jlong compare_value) { 283 284 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 285 } 286 287 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, 288 volatile intptr_t* dest, 289 intptr_t compare_value) { 290 #ifdef ARM 291 return arm_compare_and_swap(dest, compare_value, exchange_value); 292 #else 293 #ifdef M68K 294 return m68k_compare_and_swap(dest, compare_value, exchange_value); 295 #else 296 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 297 #endif // M68K 298 #endif // ARM 299 } 300 301 inline void* Atomic::cmpxchg_ptr(void* exchange_value, 302 volatile void* dest, 303 void* compare_value) { 304 305 return (void *) cmpxchg_ptr((intptr_t) exchange_value, 306 (volatile intptr_t*) dest, 307 (intptr_t) compare_value); 308 } 309 310 inline jlong Atomic::load(volatile jlong* src) { 311 volatile jlong dest; 312 os::atomic_copy64(src, &dest); 313 return dest; 314 } 315 316 inline void Atomic::store(jlong store_value, jlong* dest) { 317 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); 318 } 319 320 inline void Atomic::store(jlong store_value, volatile jlong* dest) { 321 os::atomic_copy64((volatile jlong*)&store_value, dest); 322 } 323 324 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP