atomic.h revision 361181
1/*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/asm/atomic.h 361181 2020-05-18 09:12:13Z hselasky $ 30 */ 31 32#ifndef _ASM_ATOMIC_H_ 33#define _ASM_ATOMIC_H_ 34 35#include <linux/compiler.h> 36#include <sys/types.h> 37#include <machine/atomic.h> 38 39#define ATOMIC_INIT(x) { .counter = (x) } 40 41typedef struct { 42 volatile int counter; 43} atomic_t; 44 45/*------------------------------------------------------------------------* 46 * 32-bit atomic operations 47 *------------------------------------------------------------------------*/ 48 49#define atomic_add(i, v) atomic_add_return((i), (v)) 50#define atomic_sub(i, v) atomic_sub_return((i), (v)) 51#define atomic_inc_return(v) atomic_add_return(1, (v)) 52#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 53#define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0) 54#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 55#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 56#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 57#define atomic_dec_return(v) atomic_sub_return(1, (v)) 58#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 59 60static inline int 61atomic_add_return(int i, atomic_t *v) 62{ 63 return i + atomic_fetchadd_int(&v->counter, i); 64} 65 66static inline int 67atomic_sub_return(int i, atomic_t *v) 68{ 69 return atomic_fetchadd_int(&v->counter, -i) - i; 70} 71 72static inline void 73atomic_set(atomic_t *v, int i) 74{ 75 WRITE_ONCE(v->counter, i); 76} 77 78static inline void 79atomic_set_release(atomic_t *v, int i) 80{ 81 atomic_store_rel_int(&v->counter, i); 82} 83 84static inline void 85atomic_set_mask(unsigned int mask, atomic_t *v) 86{ 87 atomic_set_int(&v->counter, mask); 88} 89 90static inline int 91atomic_read(const atomic_t *v) 92{ 93 return READ_ONCE(v->counter); 94} 95 96static inline int 97atomic_inc(atomic_t *v) 98{ 99 return atomic_fetchadd_int(&v->counter, 1) + 1; 100} 101 102static inline int 103atomic_dec(atomic_t *v) 104{ 105 return atomic_fetchadd_int(&v->counter, -1) - 1; 106} 107 108static inline int 109atomic_add_unless(atomic_t *v, int a, int u) 110{ 111 int c = atomic_read(v); 112 113 for (;;) { 114 if (unlikely(c == u)) 115 break; 116 if (likely(atomic_fcmpset_int(&v->counter, &c, c + a))) 117 break; 118 } 119 return (c != u); 120} 121 122static inline int 123atomic_fetch_add_unless(atomic_t *v, int a, int u) 124{ 125 int c = atomic_read(v); 126 127 for (;;) { 128 if (unlikely(c == u)) 129 break; 130 if (likely(atomic_fcmpset_int(&v->counter, &c, c + a))) 131 break; 132 } 133 return (c); 134} 135 136static inline void 137atomic_clear_mask(unsigned int mask, atomic_t *v) 138{ 139 atomic_clear_int(&v->counter, mask); 140} 141 142static inline int 143atomic_xchg(atomic_t *v, int i) 144{ 145#if defined(__i386__) || defined(__amd64__) || \ 146 defined(__arm__) || defined(__aarch64__) || \ 147 defined(__powerpc__) 148 return (atomic_swap_int(&v->counter, i)); 149#else 150 int ret = atomic_read(v); 151 152 while (!atomic_fcmpset_int(&v->counter, &ret, i)) 153 ; 154 return (ret); 155#endif 156} 157 158static inline int 159atomic_cmpxchg(atomic_t *v, int old, int new) 160{ 161 int ret = old; 162 163 for (;;) { 164 if (atomic_fcmpset_int(&v->counter, &ret, new)) 165 break; 166 if (ret != old) 167 break; 168 } 169 return (ret); 170} 171 172#if defined(__i386__) || defined(__amd64__) 173#define LINUXKPI_ATOMIC_8(...) __VA_ARGS__ 174#define LINUXKPI_ATOMIC_16(...) __VA_ARGS__ 175#else 176#define LINUXKPI_ATOMIC_8(...) 177#define LINUXKPI_ATOMIC_16(...) 178#endif 179 180#if defined(__amd64__) || (defined(__ARM_ARCH) && (__ARM_ARCH >= 6)) || \ 181 defined(__aarch64__) || defined(__powerpc64__) || defined(__riscv) 182#define LINUXKPI_ATOMIC_64(...) __VA_ARGS__ 183#else 184#define LINUXKPI_ATOMIC_64(...) 185#endif 186 187#define cmpxchg(ptr, old, new) ({ \ 188 union { \ 189 __typeof(*(ptr)) val; \ 190 u8 u8[0]; \ 191 u16 u16[0]; \ 192 u32 u32[0]; \ 193 u64 u64[0]; \ 194 } __ret = { .val = (old) }, __new = { .val = (new) }; \ 195 \ 196 CTASSERT( \ 197 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \ 198 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \ 199 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \ 200 sizeof(__ret.val) == 4); \ 201 \ 202 switch (sizeof(__ret.val)) { \ 203 LINUXKPI_ATOMIC_8( \ 204 case 1: \ 205 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \ 206 __ret.u8, __new.u8[0]) && __ret.val == (old)) \ 207 ; \ 208 break; \ 209 ) \ 210 LINUXKPI_ATOMIC_16( \ 211 case 2: \ 212 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \ 213 __ret.u16, __new.u16[0]) && __ret.val == (old)) \ 214 ; \ 215 break; \ 216 ) \ 217 case 4: \ 218 while (!atomic_fcmpset_32((volatile u32 *)(ptr), \ 219 __ret.u32, __new.u32[0]) && __ret.val == (old)) \ 220 ; \ 221 break; \ 222 LINUXKPI_ATOMIC_64( \ 223 case 8: \ 224 while (!atomic_fcmpset_64((volatile u64 *)(ptr), \ 225 __ret.u64, __new.u64[0]) && __ret.val == (old)) \ 226 ; \ 227 break; \ 228 ) \ 229 } \ 230 __ret.val; \ 231}) 232 233#define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__) 234 235#define xchg(ptr, new) ({ \ 236 union { \ 237 __typeof(*(ptr)) val; \ 238 u8 u8[0]; \ 239 u16 u16[0]; \ 240 u32 u32[0]; \ 241 u64 u64[0]; \ 242 } __ret, __new = { .val = (new) }; \ 243 \ 244 CTASSERT( \ 245 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \ 246 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \ 247 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \ 248 sizeof(__ret.val) == 4); \ 249 \ 250 switch (sizeof(__ret.val)) { \ 251 LINUXKPI_ATOMIC_8( \ 252 case 1: \ 253 __ret.val = READ_ONCE(*ptr); \ 254 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \ 255 __ret.u8, __new.u8[0])) \ 256 ; \ 257 break; \ 258 ) \ 259 LINUXKPI_ATOMIC_16( \ 260 case 2: \ 261 __ret.val = READ_ONCE(*ptr); \ 262 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \ 263 __ret.u16, __new.u16[0])) \ 264 ; \ 265 break; \ 266 ) \ 267 case 4: \ 268 __ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr), \ 269 __new.u32[0]); \ 270 break; \ 271 LINUXKPI_ATOMIC_64( \ 272 case 8: \ 273 __ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr), \ 274 __new.u64[0]); \ 275 break; \ 276 ) \ 277 } \ 278 __ret.val; \ 279}) 280 281static inline int 282atomic_dec_if_positive(atomic_t *v) 283{ 284 int retval; 285 int old; 286 287 old = atomic_read(v); 288 for (;;) { 289 retval = old - 1; 290 if (unlikely(retval < 0)) 291 break; 292 if (likely(atomic_fcmpset_int(&v->counter, &old, retval))) 293 break; 294 } 295 return (retval); 296} 297 298#define LINUX_ATOMIC_OP(op, c_op) \ 299static inline void atomic_##op(int i, atomic_t *v) \ 300{ \ 301 int c, old; \ 302 \ 303 c = v->counter; \ 304 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 305 c = old; \ 306} 307 308#define LINUX_ATOMIC_FETCH_OP(op, c_op) \ 309static inline int atomic_fetch_##op(int i, atomic_t *v) \ 310{ \ 311 int c, old; \ 312 \ 313 c = v->counter; \ 314 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 315 c = old; \ 316 \ 317 return (c); \ 318} 319 320LINUX_ATOMIC_OP(or, |) 321LINUX_ATOMIC_OP(and, &) 322LINUX_ATOMIC_OP(andnot, &~) 323LINUX_ATOMIC_OP(xor, ^) 324 325LINUX_ATOMIC_FETCH_OP(or, |) 326LINUX_ATOMIC_FETCH_OP(and, &) 327LINUX_ATOMIC_FETCH_OP(andnot, &~) 328LINUX_ATOMIC_FETCH_OP(xor, ^) 329 330#endif /* _ASM_ATOMIC_H_ */ 331