1#ifndef __ARCH_I386_ATOMIC__
2#define __ARCH_I386_ATOMIC__
3
4#include <linux/config.h>
5
6/*
7 * Atomic operations that C can't guarantee us.  Useful for
8 * resource counting etc..
9 */
10
11#ifdef CONFIG_SMP
12#define LOCK "lock ; "
13#else
14#define LOCK ""
15#endif
16
17/*
18 * Make sure gcc doesn't try to be clever and move things around
19 * on us. We need to use _exactly_ the address the user gave us,
20 * not some alias that contains the same information.
21 */
22typedef struct { volatile int counter; } atomic_t;
23
24#define ATOMIC_INIT(i)	{ (i) }
25
26/**
27 * atomic_read - read atomic variable
28 * @v: pointer of type atomic_t
29 *
30 * Atomically reads the value of @v.  Note that the guaranteed
31 * useful range of an atomic_t is only 24 bits.
32 */
33#define atomic_read(v)		((v)->counter)
34
35/**
36 * atomic_set - set atomic variable
37 * @v: pointer of type atomic_t
38 * @i: required value
39 *
40 * Atomically sets the value of @v to @i.  Note that the guaranteed
41 * useful range of an atomic_t is only 24 bits.
42 */
43#define atomic_set(v,i)		(((v)->counter) = (i))
44
45/**
46 * atomic_add - add integer to atomic variable
47 * @i: integer value to add
48 * @v: pointer of type atomic_t
49 *
50 * Atomically adds @i to @v.  Note that the guaranteed useful range
51 * of an atomic_t is only 24 bits.
52 */
53static __inline__ void atomic_add(int i, atomic_t *v)
54{
55	__asm__ __volatile__(
56		LOCK "addl %1,%0"
57		:"=m" (v->counter)
58		:"ir" (i), "m" (v->counter));
59}
60
61#if CPU != 386
62/* Like the above but also returns the result */
63static __inline__ int atomic_add_return(int i, atomic_t *v)
64{
65	register int oldval;
66        __asm__ __volatile__(
67                LOCK "xaddl %2,%0"
68                :"=m" (v->counter), "=r" (oldval)
69                :"1" (i), "m" (v->counter) : "memory");
70	return oldval + i;
71}
72#endif
73
74/**
75 * atomic_sub - subtract the atomic variable
76 * @i: integer value to subtract
77 * @v: pointer of type atomic_t
78 *
79 * Atomically subtracts @i from @v.  Note that the guaranteed
80 * useful range of an atomic_t is only 24 bits.
81 */
82static __inline__ void atomic_sub(int i, atomic_t *v)
83{
84	__asm__ __volatile__(
85		LOCK "subl %1,%0"
86		:"=m" (v->counter)
87		:"ir" (i), "m" (v->counter));
88}
89
90/**
91 * atomic_sub_and_test - subtract value from variable and test result
92 * @i: integer value to subtract
93 * @v: pointer of type atomic_t
94 *
95 * Atomically subtracts @i from @v and returns
96 * true if the result is zero, or false for all
97 * other cases.  Note that the guaranteed
98 * useful range of an atomic_t is only 24 bits.
99 */
100static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
101{
102	unsigned char c;
103
104	__asm__ __volatile__(
105		LOCK "subl %2,%0; sete %1"
106		:"=m" (v->counter), "=qm" (c)
107		:"ir" (i), "m" (v->counter) : "memory");
108	return c;
109}
110
111/**
112 * atomic_inc - increment atomic variable
113 * @v: pointer of type atomic_t
114 *
115 * Atomically increments @v by 1.  Note that the guaranteed
116 * useful range of an atomic_t is only 24 bits.
117 */
118static __inline__ void atomic_inc(atomic_t *v)
119{
120	__asm__ __volatile__(
121		LOCK "incl %0"
122		:"=m" (v->counter)
123		:"m" (v->counter));
124}
125
126/**
127 * atomic_dec - decrement atomic variable
128 * @v: pointer of type atomic_t
129 *
130 * Atomically decrements @v by 1.  Note that the guaranteed
131 * useful range of an atomic_t is only 24 bits.
132 */
133static __inline__ void atomic_dec(atomic_t *v)
134{
135	__asm__ __volatile__(
136		LOCK "decl %0"
137		:"=m" (v->counter)
138		:"m" (v->counter));
139}
140
141/**
142 * atomic_dec_and_test - decrement and test
143 * @v: pointer of type atomic_t
144 *
145 * Atomically decrements @v by 1 and
146 * returns true if the result is 0, or false for all other
147 * cases.  Note that the guaranteed
148 * useful range of an atomic_t is only 24 bits.
149 */
150static __inline__ int atomic_dec_and_test(atomic_t *v)
151{
152	unsigned char c;
153
154	__asm__ __volatile__(
155		LOCK "decl %0; sete %1"
156		:"=m" (v->counter), "=qm" (c)
157		:"m" (v->counter) : "memory");
158	return c != 0;
159}
160
161/**
162 * atomic_inc_and_test - increment and test
163 * @v: pointer of type atomic_t
164 *
165 * Atomically increments @v by 1
166 * and returns true if the result is zero, or false for all
167 * other cases.  Note that the guaranteed
168 * useful range of an atomic_t is only 24 bits.
169 */
170static __inline__ int atomic_inc_and_test(atomic_t *v)
171{
172	unsigned char c;
173
174	__asm__ __volatile__(
175		LOCK "incl %0; sete %1"
176		:"=m" (v->counter), "=qm" (c)
177		:"m" (v->counter) : "memory");
178	return c != 0;
179}
180
181/**
182 * atomic_add_negative - add and test if negative
183 * @v: pointer of type atomic_t
184 * @i: integer value to add
185 *
186 * Atomically adds @i to @v and returns true
187 * if the result is negative, or false when
188 * result is greater than or equal to zero.  Note that the guaranteed
189 * useful range of an atomic_t is only 24 bits.
190 */
191static __inline__ int atomic_add_negative(int i, atomic_t *v)
192{
193	unsigned char c;
194
195	__asm__ __volatile__(
196		LOCK "addl %2,%0; sets %1"
197		:"=m" (v->counter), "=qm" (c)
198		:"ir" (i), "m" (v->counter) : "memory");
199	return c;
200}
201
202/* These are x86-specific, used by some header files */
203#define atomic_clear_mask(mask, addr) \
204__asm__ __volatile__(LOCK "andl %0,%1" \
205: : "r" (~(mask)),"m" (*addr) : "memory")
206
207#define atomic_set_mask(mask, addr) \
208__asm__ __volatile__(LOCK "orl %0,%1" \
209: : "r" (mask),"m" (*addr) : "memory")
210
211/* Atomic operations are already serializing on x86 */
212#define smp_mb__before_atomic_dec()	barrier()
213#define smp_mb__after_atomic_dec()	barrier()
214#define smp_mb__before_atomic_inc()	barrier()
215#define smp_mb__after_atomic_inc()	barrier()
216
217#endif
218