1/*
2 * x86 version of "atomic_dec_and_lock()" using
3 * the atomic "cmpxchg" instruction.
4 *
5 * (For CPU's lacking cmpxchg, we use the slow
6 * generic version, and this one never even gets
7 * compiled).
8 */
9
10#include <linux/spinlock.h>
11#include <asm/atomic.h>
12
13int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
14{
15	int counter;
16	int newcount;
17
18repeat:
19	counter = atomic_read(atomic);
20	newcount = counter-1;
21
22	if (!newcount)
23		goto slow_path;
24
25	asm volatile("lock; cmpxchgl %1,%2"
26		:"=a" (newcount)
27		:"r" (newcount), "m" (atomic->counter), "0" (counter));
28
29	/* If the above failed, "eax" will have changed */
30	if (newcount != counter)
31		goto repeat;
32	return 0;
33
34slow_path:
35	spin_lock(lock);
36	if (atomic_dec_and_test(atomic))
37		return 1;
38	spin_unlock(lock);
39	return 0;
40}
41