atomic.h revision 91469
1/*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/include/atomic.h 91469 2002-02-28 06:17:05Z bmilekic $ 27 */ 28#ifndef _MACHINE_ATOMIC_H_ 29#define _MACHINE_ATOMIC_H_ 30 31/* 32 * Various simple arithmetic on memory which is atomic in the presence 33 * of interrupts and multiple processors. 34 * 35 * atomic_set_char(P, V) (*(u_char*)(P) |= (V)) 36 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V)) 37 * atomic_add_char(P, V) (*(u_char*)(P) += (V)) 38 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V)) 39 * 40 * atomic_set_short(P, V) (*(u_short*)(P) |= (V)) 41 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V)) 42 * atomic_add_short(P, V) (*(u_short*)(P) += (V)) 43 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V)) 44 * 45 * atomic_set_int(P, V) (*(u_int*)(P) |= (V)) 46 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V)) 47 * atomic_add_int(P, V) (*(u_int*)(P) += (V)) 48 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V)) 49 * atomic_readandclear_int(P) (return *(u_int*)P; *(u_int*)P = 0;) 50 * 51 * atomic_set_long(P, V) (*(u_long*)(P) |= (V)) 52 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V)) 53 * atomic_add_long(P, V) (*(u_long*)(P) += (V)) 54 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V)) 55 * atomic_readandclear_long(P) (return *(u_long*)P; *(u_long*)P = 0;) 56 */ 57 58/* 59 * The above functions are expanded inline in the statically-linked 60 * kernel. Lock prefixes are generated if an SMP kernel is being 61 * built. 62 * 63 * Kernel modules call real functions which are built into the kernel. 64 * This allows kernel modules to be portable between UP and SMP systems. 65 */ 66#if defined(KLD_MODULE) 67#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 68void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); 69 70int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); 71 72#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 73u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 74void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v); 75 76#else /* !KLD_MODULE */ 77 78/* 79 * For userland, assume the SMP case and use lock prefixes so that 80 * the binaries will run on both types of systems. 81 */ 82#if defined(SMP) || !defined(_KERNEL) 83#define MPLOCKED lock ; 84#else 85#define MPLOCKED 86#endif 87 88/* 89 * The assembly is volatilized to demark potential before-and-after side 90 * effects if an interrupt or SMP collision were to occur. 91 */ 92#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 93static __inline void \ 94atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 95{ \ 96 __asm __volatile(__XSTRING(MPLOCKED) OP \ 97 : "+m" (*p) \ 98 : CONS (V)); \ 99} 100 101/* 102 * Atomic compare and set, used by the mutex functions 103 * 104 * if (*dst == exp) *dst = src (all 32 bit words) 105 * 106 * Returns 0 on failure, non-zero on success 107 */ 108 109#if defined(I386_CPU) 110static __inline int 111atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) 112{ 113 int res = exp; 114 115 __asm __volatile( 116 " pushfl ; " 117 " cli ; " 118 " cmpl %0,%2 ; " 119 " jne 1f ; " 120 " movl %1,%2 ; " 121 "1: " 122 " sete %%al; " 123 " movzbl %%al,%0 ; " 124 " popfl ; " 125 "# atomic_cmpset_int" 126 : "+a" (res) /* 0 (result) */ 127 : "r" (src), /* 1 */ 128 "m" (*(dst)) /* 2 */ 129 : "memory"); 130 131 return (res); 132} 133#else /* defined(I386_CPU) */ 134static __inline int 135atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src) 136{ 137 int res = exp; 138 139 __asm __volatile ( 140 " " __XSTRING(MPLOCKED) " " 141 " cmpxchgl %1,%2 ; " 142 " setz %%al ; " 143 " movzbl %%al,%0 ; " 144 "1: " 145 "# atomic_cmpset_int" 146 : "+a" (res) /* 0 (result) */ 147 : "r" (src), /* 1 */ 148 "m" (*(dst)) /* 2 */ 149 : "memory"); 150 151 return (res); 152} 153#endif /* defined(I386_CPU) */ 154 155#if defined(I386_CPU) 156/* 157 * We assume that a = b will do atomic loads and stores. 158 * 159 * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee 160 * memory ordering. These should only be used on a 386. 161 */ 162#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 163static __inline u_##TYPE \ 164atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 165{ \ 166 return (*p); \ 167} \ 168 \ 169static __inline void \ 170atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 171{ \ 172 *p = v; \ 173 __asm __volatile("" : : : "memory"); \ 174} 175#else 176 177#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 178static __inline u_##TYPE \ 179atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 180{ \ 181 u_##TYPE res; \ 182 \ 183 __asm __volatile(__XSTRING(MPLOCKED) LOP \ 184 : "=a" (res), /* 0 (result) */\ 185 "+m" (*p) /* 1 */ \ 186 : : "memory"); \ 187 \ 188 return (res); \ 189} \ 190 \ 191/* \ 192 * The XCHG instruction asserts LOCK automagically. \ 193 */ \ 194static __inline void \ 195atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 196{ \ 197 __asm __volatile(SOP \ 198 : "+m" (*p), /* 0 */ \ 199 "+r" (v) /* 1 */ \ 200 : : "memory"); \ 201} 202#endif /* defined(I386_CPU) */ 203#endif /* KLD_MODULE */ 204 205ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v) 206ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v) 207ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v) 208ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v) 209 210ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v) 211ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v) 212ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v) 213ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v) 214 215ATOMIC_ASM(set, int, "orl %1,%0", "ir", v) 216ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v) 217ATOMIC_ASM(add, int, "addl %1,%0", "ir", v) 218ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v) 219 220ATOMIC_ASM(set, long, "orl %1,%0", "ir", v) 221ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v) 222ATOMIC_ASM(add, long, "addl %1,%0", "ir", v) 223ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v) 224 225ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0") 226ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0") 227ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0") 228ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0") 229 230#undef ATOMIC_ASM 231#undef ATOMIC_STORE_LOAD 232 233#define atomic_set_acq_char atomic_set_char 234#define atomic_set_rel_char atomic_set_char 235#define atomic_clear_acq_char atomic_clear_char 236#define atomic_clear_rel_char atomic_clear_char 237#define atomic_add_acq_char atomic_add_char 238#define atomic_add_rel_char atomic_add_char 239#define atomic_subtract_acq_char atomic_subtract_char 240#define atomic_subtract_rel_char atomic_subtract_char 241 242#define atomic_set_acq_short atomic_set_short 243#define atomic_set_rel_short atomic_set_short 244#define atomic_clear_acq_short atomic_clear_short 245#define atomic_clear_rel_short atomic_clear_short 246#define atomic_add_acq_short atomic_add_short 247#define atomic_add_rel_short atomic_add_short 248#define atomic_subtract_acq_short atomic_subtract_short 249#define atomic_subtract_rel_short atomic_subtract_short 250 251#define atomic_set_acq_int atomic_set_int 252#define atomic_set_rel_int atomic_set_int 253#define atomic_clear_acq_int atomic_clear_int 254#define atomic_clear_rel_int atomic_clear_int 255#define atomic_add_acq_int atomic_add_int 256#define atomic_add_rel_int atomic_add_int 257#define atomic_subtract_acq_int atomic_subtract_int 258#define atomic_subtract_rel_int atomic_subtract_int 259#define atomic_cmpset_acq_int atomic_cmpset_int 260#define atomic_cmpset_rel_int atomic_cmpset_int 261 262#define atomic_set_acq_long atomic_set_long 263#define atomic_set_rel_long atomic_set_long 264#define atomic_clear_acq_long atomic_clear_long 265#define atomic_clear_rel_long atomic_clear_long 266#define atomic_add_acq_long atomic_add_long 267#define atomic_add_rel_long atomic_add_long 268#define atomic_subtract_acq_long atomic_subtract_long 269#define atomic_subtract_rel_long atomic_subtract_long 270#define atomic_cmpset_long atomic_cmpset_int 271#define atomic_cmpset_acq_long atomic_cmpset_acq_int 272#define atomic_cmpset_rel_long atomic_cmpset_rel_int 273 274#define atomic_cmpset_acq_ptr atomic_cmpset_ptr 275#define atomic_cmpset_rel_ptr atomic_cmpset_ptr 276 277#define atomic_set_8 atomic_set_char 278#define atomic_set_acq_8 atomic_set_acq_char 279#define atomic_set_rel_8 atomic_set_rel_char 280#define atomic_clear_8 atomic_clear_char 281#define atomic_clear_acq_8 atomic_clear_acq_char 282#define atomic_clear_rel_8 atomic_clear_rel_char 283#define atomic_add_8 atomic_add_char 284#define atomic_add_acq_8 atomic_add_acq_char 285#define atomic_add_rel_8 atomic_add_rel_char 286#define atomic_subtract_8 atomic_subtract_char 287#define atomic_subtract_acq_8 atomic_subtract_acq_char 288#define atomic_subtract_rel_8 atomic_subtract_rel_char 289#define atomic_load_acq_8 atomic_load_acq_char 290#define atomic_store_rel_8 atomic_store_rel_char 291 292#define atomic_set_16 atomic_set_short 293#define atomic_set_acq_16 atomic_set_acq_short 294#define atomic_set_rel_16 atomic_set_rel_short 295#define atomic_clear_16 atomic_clear_short 296#define atomic_clear_acq_16 atomic_clear_acq_short 297#define atomic_clear_rel_16 atomic_clear_rel_short 298#define atomic_add_16 atomic_add_short 299#define atomic_add_acq_16 atomic_add_acq_short 300#define atomic_add_rel_16 atomic_add_rel_short 301#define atomic_subtract_16 atomic_subtract_short 302#define atomic_subtract_acq_16 atomic_subtract_acq_short 303#define atomic_subtract_rel_16 atomic_subtract_rel_short 304#define atomic_load_acq_16 atomic_load_acq_short 305#define atomic_store_rel_16 atomic_store_rel_short 306 307#define atomic_set_32 atomic_set_int 308#define atomic_set_acq_32 atomic_set_acq_int 309#define atomic_set_rel_32 atomic_set_rel_int 310#define atomic_clear_32 atomic_clear_int 311#define atomic_clear_acq_32 atomic_clear_acq_int 312#define atomic_clear_rel_32 atomic_clear_rel_int 313#define atomic_add_32 atomic_add_int 314#define atomic_add_acq_32 atomic_add_acq_int 315#define atomic_add_rel_32 atomic_add_rel_int 316#define atomic_subtract_32 atomic_subtract_int 317#define atomic_subtract_acq_32 atomic_subtract_acq_int 318#define atomic_subtract_rel_32 atomic_subtract_rel_int 319#define atomic_load_acq_32 atomic_load_acq_int 320#define atomic_store_rel_32 atomic_store_rel_int 321#define atomic_cmpset_32 atomic_cmpset_int 322#define atomic_cmpset_acq_32 atomic_cmpset_acq_int 323#define atomic_cmpset_rel_32 atomic_cmpset_rel_int 324#define atomic_readandclear_32 atomic_readandclear_int 325 326#if !defined(WANT_FUNCTIONS) 327static __inline int 328atomic_cmpset_ptr(volatile void *dst, void *exp, void *src) 329{ 330 331 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp, 332 (u_int)src)); 333} 334 335static __inline void * 336atomic_load_acq_ptr(volatile void *p) 337{ 338 return (void *)atomic_load_acq_int((volatile u_int *)p); 339} 340 341static __inline void 342atomic_store_rel_ptr(volatile void *p, void *v) 343{ 344 atomic_store_rel_int((volatile u_int *)p, (u_int)v); 345} 346 347#define ATOMIC_PTR(NAME) \ 348static __inline void \ 349atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \ 350{ \ 351 atomic_##NAME##_int((volatile u_int *)p, v); \ 352} \ 353 \ 354static __inline void \ 355atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \ 356{ \ 357 atomic_##NAME##_acq_int((volatile u_int *)p, v);\ 358} \ 359 \ 360static __inline void \ 361atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \ 362{ \ 363 atomic_##NAME##_rel_int((volatile u_int *)p, v);\ 364} 365 366ATOMIC_PTR(set) 367ATOMIC_PTR(clear) 368ATOMIC_PTR(add) 369ATOMIC_PTR(subtract) 370 371#undef ATOMIC_PTR 372 373static __inline u_int 374atomic_readandclear_int(volatile u_int *addr) 375{ 376 u_int result; 377 378 __asm __volatile ( 379 " xorl %0,%0 ; " 380 " xchgl %1,%0 ; " 381 "# atomic_readandclear_int" 382 : "=&r" (result) /* 0 (result) */ 383 : "m" (*addr)); /* 1 (addr) */ 384 385 return (result); 386} 387 388static __inline u_long 389atomic_readandclear_long(volatile u_long *addr) 390{ 391 u_long result; 392 393 __asm __volatile ( 394 " xorl %0,%0 ; " 395 " xchgl %1,%0 ; " 396 "# atomic_readandclear_int" 397 : "=&r" (result) /* 0 (result) */ 398 : "m" (*addr)); /* 1 (addr) */ 399 400 return (result); 401} 402#endif /* !defined(WANT_FUNCTIONS) */ 403#endif /* ! _MACHINE_ATOMIC_H_ */ 404