• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/arm/include/asm/
1/*
2 *  arch/arm/include/asm/atomic.h
3 *
4 *  Copyright (C) 1996 Russell King.
5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/compiler.h>
15#include <linux/types.h>
16#include <asm/system.h>
17
18#define ATOMIC_INIT(i)	{ (i) }
19
20#ifdef __KERNEL__
21
22/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
27#define atomic_read(v)	(*(volatile int *)&(v)->counter)
28#define atomic_set(v,i)	(((v)->counter) = (i))
29
30#if __LINUX_ARM_ARCH__ >= 6
31
32/*
33 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
34 * store exclusive to ensure that these are atomic.  We may loop
35 * to ensure that the update happens.
36 */
37static inline void atomic_add(int i, atomic_t *v)
38{
39	unsigned long tmp;
40	int result;
41
42	__asm__ __volatile__("@ atomic_add\n"
43"1:	ldrex	%0, [%3]\n"
44"	add	%0, %0, %4\n"
45"	strex	%1, %0, [%3]\n"
46"	teq	%1, #0\n"
47"	bne	1b"
48	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
49	: "r" (&v->counter), "Ir" (i)
50	: "cc");
51}
52
53static inline int atomic_add_return(int i, atomic_t *v)
54{
55	unsigned long tmp;
56	int result;
57
58	smp_mb();
59
60	__asm__ __volatile__("@ atomic_add_return\n"
61"1:	ldrex	%0, [%3]\n"
62"	add	%0, %0, %4\n"
63"	strex	%1, %0, [%3]\n"
64"	teq	%1, #0\n"
65"	bne	1b"
66	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
67	: "r" (&v->counter), "Ir" (i)
68	: "cc");
69
70	smp_mb();
71
72	return result;
73}
74
75static inline void atomic_sub(int i, atomic_t *v)
76{
77	unsigned long tmp;
78	int result;
79
80	__asm__ __volatile__("@ atomic_sub\n"
81"1:	ldrex	%0, [%3]\n"
82"	sub	%0, %0, %4\n"
83"	strex	%1, %0, [%3]\n"
84"	teq	%1, #0\n"
85"	bne	1b"
86	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
87	: "r" (&v->counter), "Ir" (i)
88	: "cc");
89}
90
91static inline int atomic_sub_return(int i, atomic_t *v)
92{
93	unsigned long tmp;
94	int result;
95
96	smp_mb();
97
98	__asm__ __volatile__("@ atomic_sub_return\n"
99"1:	ldrex	%0, [%3]\n"
100"	sub	%0, %0, %4\n"
101"	strex	%1, %0, [%3]\n"
102"	teq	%1, #0\n"
103"	bne	1b"
104	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
105	: "r" (&v->counter), "Ir" (i)
106	: "cc");
107
108	smp_mb();
109
110	return result;
111}
112
113static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
114{
115	unsigned long oldval, res;
116
117	smp_mb();
118
119	do {
120		__asm__ __volatile__("@ atomic_cmpxchg\n"
121		"ldrex	%1, [%3]\n"
122		"mov	%0, #0\n"
123		"teq	%1, %4\n"
124		"strexeq %0, %5, [%3]\n"
125		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
126		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
127		    : "cc");
128	} while (res);
129
130	smp_mb();
131
132	return oldval;
133}
134
135static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
136{
137	unsigned long tmp, tmp2;
138
139	__asm__ __volatile__("@ atomic_clear_mask\n"
140"1:	ldrex	%0, [%3]\n"
141"	bic	%0, %0, %4\n"
142"	strex	%1, %0, [%3]\n"
143"	teq	%1, #0\n"
144"	bne	1b"
145	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
146	: "r" (addr), "Ir" (mask)
147	: "cc");
148}
149
150#else /* ARM_ARCH_6 */
151
152#ifdef CONFIG_SMP
153#error SMP not supported on pre-ARMv6 CPUs
154#endif
155
156static inline int atomic_add_return(int i, atomic_t *v)
157{
158	unsigned long flags;
159	int val;
160
161	raw_local_irq_save(flags);
162	val = v->counter;
163	v->counter = val += i;
164	raw_local_irq_restore(flags);
165
166	return val;
167}
168#define atomic_add(i, v)	(void) atomic_add_return(i, v)
169
170static inline int atomic_sub_return(int i, atomic_t *v)
171{
172	unsigned long flags;
173	int val;
174
175	raw_local_irq_save(flags);
176	val = v->counter;
177	v->counter = val -= i;
178	raw_local_irq_restore(flags);
179
180	return val;
181}
182#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
183
184static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185{
186	int ret;
187	unsigned long flags;
188
189	raw_local_irq_save(flags);
190	ret = v->counter;
191	if (likely(ret == old))
192		v->counter = new;
193	raw_local_irq_restore(flags);
194
195	return ret;
196}
197
198static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
199{
200	unsigned long flags;
201
202	raw_local_irq_save(flags);
203	*addr &= ~mask;
204	raw_local_irq_restore(flags);
205}
206
207#endif /* __LINUX_ARM_ARCH__ */
208
209#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
210
211static inline int atomic_add_unless(atomic_t *v, int a, int u)
212{
213	int c, old;
214
215	c = atomic_read(v);
216	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
217		c = old;
218	return c != u;
219}
220#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
221
222#define atomic_inc(v)		atomic_add(1, v)
223#define atomic_dec(v)		atomic_sub(1, v)
224
225#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
226#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
227#define atomic_inc_return(v)    (atomic_add_return(1, v))
228#define atomic_dec_return(v)    (atomic_sub_return(1, v))
229#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
230
231#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
232
233#define smp_mb__before_atomic_dec()	smp_mb()
234#define smp_mb__after_atomic_dec()	smp_mb()
235#define smp_mb__before_atomic_inc()	smp_mb()
236#define smp_mb__after_atomic_inc()	smp_mb()
237
238#ifndef CONFIG_GENERIC_ATOMIC64
239typedef struct {
240	u64 __aligned(8) counter;
241} atomic64_t;
242
243#define ATOMIC64_INIT(i) { (i) }
244
245static inline u64 atomic64_read(atomic64_t *v)
246{
247	u64 result;
248
249	__asm__ __volatile__("@ atomic64_read\n"
250"	ldrexd	%0, %H0, [%1]"
251	: "=&r" (result)
252	: "r" (&v->counter), "Qo" (v->counter)
253	);
254
255	return result;
256}
257
258static inline void atomic64_set(atomic64_t *v, u64 i)
259{
260	u64 tmp;
261
262	__asm__ __volatile__("@ atomic64_set\n"
263"1:	ldrexd	%0, %H0, [%2]\n"
264"	strexd	%0, %3, %H3, [%2]\n"
265"	teq	%0, #0\n"
266"	bne	1b"
267	: "=&r" (tmp), "=Qo" (v->counter)
268	: "r" (&v->counter), "r" (i)
269	: "cc");
270}
271
272static inline void atomic64_add(u64 i, atomic64_t *v)
273{
274	u64 result;
275	unsigned long tmp;
276
277	__asm__ __volatile__("@ atomic64_add\n"
278"1:	ldrexd	%0, %H0, [%3]\n"
279"	adds	%0, %0, %4\n"
280"	adc	%H0, %H0, %H4\n"
281"	strexd	%1, %0, %H0, [%3]\n"
282"	teq	%1, #0\n"
283"	bne	1b"
284	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
285	: "r" (&v->counter), "r" (i)
286	: "cc");
287}
288
289static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
290{
291	u64 result;
292	unsigned long tmp;
293
294	smp_mb();
295
296	__asm__ __volatile__("@ atomic64_add_return\n"
297"1:	ldrexd	%0, %H0, [%3]\n"
298"	adds	%0, %0, %4\n"
299"	adc	%H0, %H0, %H4\n"
300"	strexd	%1, %0, %H0, [%3]\n"
301"	teq	%1, #0\n"
302"	bne	1b"
303	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
304	: "r" (&v->counter), "r" (i)
305	: "cc");
306
307	smp_mb();
308
309	return result;
310}
311
312static inline void atomic64_sub(u64 i, atomic64_t *v)
313{
314	u64 result;
315	unsigned long tmp;
316
317	__asm__ __volatile__("@ atomic64_sub\n"
318"1:	ldrexd	%0, %H0, [%3]\n"
319"	subs	%0, %0, %4\n"
320"	sbc	%H0, %H0, %H4\n"
321"	strexd	%1, %0, %H0, [%3]\n"
322"	teq	%1, #0\n"
323"	bne	1b"
324	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
325	: "r" (&v->counter), "r" (i)
326	: "cc");
327}
328
329static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
330{
331	u64 result;
332	unsigned long tmp;
333
334	smp_mb();
335
336	__asm__ __volatile__("@ atomic64_sub_return\n"
337"1:	ldrexd	%0, %H0, [%3]\n"
338"	subs	%0, %0, %4\n"
339"	sbc	%H0, %H0, %H4\n"
340"	strexd	%1, %0, %H0, [%3]\n"
341"	teq	%1, #0\n"
342"	bne	1b"
343	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
344	: "r" (&v->counter), "r" (i)
345	: "cc");
346
347	smp_mb();
348
349	return result;
350}
351
352static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
353{
354	u64 oldval;
355	unsigned long res;
356
357	smp_mb();
358
359	do {
360		__asm__ __volatile__("@ atomic64_cmpxchg\n"
361		"ldrexd		%1, %H1, [%3]\n"
362		"mov		%0, #0\n"
363		"teq		%1, %4\n"
364		"teqeq		%H1, %H4\n"
365		"strexdeq	%0, %5, %H5, [%3]"
366		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
367		: "r" (&ptr->counter), "r" (old), "r" (new)
368		: "cc");
369	} while (res);
370
371	smp_mb();
372
373	return oldval;
374}
375
376static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
377{
378	u64 result;
379	unsigned long tmp;
380
381	smp_mb();
382
383	__asm__ __volatile__("@ atomic64_xchg\n"
384"1:	ldrexd	%0, %H0, [%3]\n"
385"	strexd	%1, %4, %H4, [%3]\n"
386"	teq	%1, #0\n"
387"	bne	1b"
388	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
389	: "r" (&ptr->counter), "r" (new)
390	: "cc");
391
392	smp_mb();
393
394	return result;
395}
396
397static inline u64 atomic64_dec_if_positive(atomic64_t *v)
398{
399	u64 result;
400	unsigned long tmp;
401
402	smp_mb();
403
404	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1:	ldrexd	%0, %H0, [%3]\n"
406"	subs	%0, %0, #1\n"
407"	sbc	%H0, %H0, #0\n"
408"	teq	%H0, #0\n"
409"	bmi	2f\n"
410"	strexd	%1, %0, %H0, [%3]\n"
411"	teq	%1, #0\n"
412"	bne	1b\n"
413"2:"
414	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
415	: "r" (&v->counter)
416	: "cc");
417
418	smp_mb();
419
420	return result;
421}
422
423static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
424{
425	u64 val;
426	unsigned long tmp;
427	int ret = 1;
428
429	smp_mb();
430
431	__asm__ __volatile__("@ atomic64_add_unless\n"
432"1:	ldrexd	%0, %H0, [%4]\n"
433"	teq	%0, %5\n"
434"	teqeq	%H0, %H5\n"
435"	moveq	%1, #0\n"
436"	beq	2f\n"
437"	adds	%0, %0, %6\n"
438"	adc	%H0, %H0, %H6\n"
439"	strexd	%2, %0, %H0, [%4]\n"
440"	teq	%2, #0\n"
441"	bne	1b\n"
442"2:"
443	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
444	: "r" (&v->counter), "r" (u), "r" (a)
445	: "cc");
446
447	if (ret)
448		smp_mb();
449
450	return ret;
451}
452
453#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
454#define atomic64_inc(v)			atomic64_add(1LL, (v))
455#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
456#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
457#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
458#define atomic64_dec(v)			atomic64_sub(1LL, (v))
459#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
460#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
461#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
462
463#else /* !CONFIG_GENERIC_ATOMIC64 */
464#include <asm-generic/atomic64.h>
465#endif
466#include <asm-generic/atomic-long.h>
467#endif
468#endif
469