1#ifndef __ARCH_X86_64_ATOMIC__
2#define __ARCH_X86_64_ATOMIC__
3
4#include <linux/config.h>
5
6/* atomic_t should be 32 bit signed type */
7
8/*
9 * Atomic operations that C can't guarantee us.  Useful for
10 * resource counting etc..
11 */
12
13#ifdef CONFIG_SMP
14#define LOCK "lock ; "
15#else
16#define LOCK ""
17#endif
18
19/*
20 * Make sure gcc doesn't try to be clever and move things around
21 * on us. We need to use _exactly_ the address the user gave us,
22 * not some alias that contains the same information.
23 */
24typedef struct { volatile int counter; } atomic_t;
25
26#define ATOMIC_INIT(i)	{ (i) }
27
28/**
29 * atomic_read - read atomic variable
30 * @v: pointer of type atomic_t
31 *
32 * Atomically reads the value of @v.  Note that the guaranteed
33 * useful range of an atomic_t is only 24 bits.
34 */
35#define atomic_read(v)		((v)->counter)
36
37/**
38 * atomic_set - set atomic variable
39 * @v: pointer of type atomic_t
40 * @i: required value
41 *
42 * Atomically sets the value of @v to @i.  Note that the guaranteed
43 * useful range of an atomic_t is only 24 bits.
44 */
45#define atomic_set(v,i)		(((v)->counter) = (i))
46
47/**
48 * atomic_add - add integer to atomic variable
49 * @i: integer value to add
50 * @v: pointer of type atomic_t
51 *
52 * Atomically adds @i to @v.  Note that the guaranteed useful range
53 * of an atomic_t is only 24 bits.
54 */
55static __inline__ void atomic_add(int i, atomic_t *v)
56{
57	__asm__ __volatile__(
58		LOCK "addl %1,%0"
59		:"=m" (v->counter)
60		:"ir" (i), "m" (v->counter));
61}
62
63/**
64 * atomic_sub - subtract the atomic variable
65 * @i: integer value to subtract
66 * @v: pointer of type atomic_t
67 *
68 * Atomically subtracts @i from @v.  Note that the guaranteed
69 * useful range of an atomic_t is only 24 bits.
70 */
71static __inline__ void atomic_sub(int i, atomic_t *v)
72{
73	__asm__ __volatile__(
74		LOCK "subl %1,%0"
75		:"=m" (v->counter)
76		:"ir" (i), "m" (v->counter));
77}
78
79/**
80 * atomic_sub_and_test - subtract value from variable and test result
81 * @i: integer value to subtract
82 * @v: pointer of type atomic_t
83 *
84 * Atomically subtracts @i from @v and returns
85 * true if the result is zero, or false for all
86 * other cases.  Note that the guaranteed
87 * useful range of an atomic_t is only 24 bits.
88 */
89static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
90{
91	unsigned char c;
92
93	__asm__ __volatile__(
94		LOCK "subl %2,%0; sete %1"
95		:"=m" (v->counter), "=qm" (c)
96		:"ir" (i), "m" (v->counter) : "memory");
97	return c;
98}
99
100/**
101 * atomic_inc - increment atomic variable
102 * @v: pointer of type atomic_t
103 *
104 * Atomically increments @v by 1.  Note that the guaranteed
105 * useful range of an atomic_t is only 24 bits.
106 */
107static __inline__ void atomic_inc(atomic_t *v)
108{
109	__asm__ __volatile__(
110		LOCK "incl %0"
111		:"=m" (v->counter)
112		:"m" (v->counter));
113}
114
115/**
116 * atomic_dec - decrement atomic variable
117 * @v: pointer of type atomic_t
118 *
119 * Atomically decrements @v by 1.  Note that the guaranteed
120 * useful range of an atomic_t is only 24 bits.
121 */
122static __inline__ void atomic_dec(atomic_t *v)
123{
124	__asm__ __volatile__(
125		LOCK "decl %0"
126		:"=m" (v->counter)
127		:"m" (v->counter));
128}
129
130/**
131 * atomic_dec_and_test - decrement and test
132 * @v: pointer of type atomic_t
133 *
134 * Atomically decrements @v by 1 and
135 * returns true if the result is 0, or false for all other
136 * cases.  Note that the guaranteed
137 * useful range of an atomic_t is only 24 bits.
138 */
139static __inline__ int atomic_dec_and_test(atomic_t *v)
140{
141	unsigned char c;
142
143	__asm__ __volatile__(
144		LOCK "decl %0; sete %1"
145		:"=m" (v->counter), "=qm" (c)
146		:"m" (v->counter) : "memory");
147	return c != 0;
148}
149
150/**
151 * atomic_inc_and_test - increment and test
152 * @v: pointer of type atomic_t
153 *
154 * Atomically increments @v by 1
155 * and returns true if the result is zero, or false for all
156 * other cases.  Note that the guaranteed
157 * useful range of an atomic_t is only 24 bits.
158 */
159static __inline__ int atomic_inc_and_test(atomic_t *v)
160{
161	unsigned char c;
162
163	__asm__ __volatile__(
164		LOCK "incl %0; sete %1"
165		:"=m" (v->counter), "=qm" (c)
166		:"m" (v->counter) : "memory");
167	return c != 0;
168}
169
170/**
171 * atomic_add_negative - add and test if negative
172 * @v: pointer of type atomic_t
173 * @i: integer value to add
174 *
175 * Atomically adds @i to @v and returns true
176 * if the result is negative, or false when
177 * result is greater than or equal to zero.  Note that the guaranteed
178 * useful range of an atomic_t is only 24 bits.
179 */
180static __inline__ int atomic_add_negative(int i, atomic_t *v)
181{
182	unsigned char c;
183
184	__asm__ __volatile__(
185		LOCK "addl %2,%0; sets %1"
186		:"=m" (v->counter), "=qm" (c)
187		:"ir" (i), "m" (v->counter) : "memory");
188	return c;
189}
190
191/* These are x86-specific, used by some header files */
192#define atomic_clear_mask(mask, addr) \
193__asm__ __volatile__(LOCK "andl %0,%1" \
194: : "r" (~(mask)),"m" (*addr) : "memory")
195
196#define atomic_set_mask(mask, addr) \
197__asm__ __volatile__(LOCK "orl %0,%1" \
198: : "r" ((unsigned)mask),"m" (*addr) : "memory")
199
200/* Atomic operations are already serializing on x86 */
201#define smp_mb__before_atomic_dec()	barrier()
202#define smp_mb__after_atomic_dec()	barrier()
203#define smp_mb__before_atomic_inc()	barrier()
204#define smp_mb__after_atomic_inc()	barrier()
205
206#endif
207