1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <linux/config.h>
8
9extern int printk(const char * fmt, ...)
10	__attribute__ ((format (printf, 1, 2)));
11
12/* It seems that people are forgetting to
13 * initialize their spinlocks properly, tsk tsk.
14 * Remember to turn this off in 2.4. -ben
15 */
16#if defined(CONFIG_DEBUG_SPINLOCK)
17#define SPINLOCK_DEBUG	1
18#else
19#define SPINLOCK_DEBUG	0
20#endif
21
22/*
23 * Your basic SMP spinlocks, allowing only a single CPU anywhere
24 */
25
26typedef struct {
27	volatile unsigned int lock;
28#if SPINLOCK_DEBUG
29	unsigned magic;
30#endif
31} spinlock_t;
32
33#define SPINLOCK_MAGIC	0xdead4ead
34
35#if SPINLOCK_DEBUG
36#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
37#else
38#define SPINLOCK_MAGIC_INIT	/* */
39#endif
40
41#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
42
43#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
44
45/*
46 * Simple spin lock operations.  There are two variants, one clears IRQ's
47 * on the local processor, one does not.
48 *
49 * We make no fairness assumptions. They have a cost.
50 */
51
52#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->lock) <= 0)
53#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
54
55#define spin_lock_string \
56	"\n1:\t" \
57	"lock ; decb %0\n\t" \
58	"js 2f\n" \
59	LOCK_SECTION_START("") \
60	"2:\t" \
61	"cmpb $0,%0\n\t" \
62	"rep;nop\n\t" \
63	"jle 2b\n\t" \
64	"jmp 1b\n" \
65	LOCK_SECTION_END
66
67/*
68 * This works. Despite all the confusion.
69 * (except on PPro SMP or if we are using OOSTORE)
70 * (PPro errata 66, 92)
71 */
72
73#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
74
75#define spin_unlock_string \
76	"movb $1,%0" \
77		:"=m" (lock->lock) : : "memory"
78
79
80static inline void spin_unlock(spinlock_t *lock)
81{
82#if SPINLOCK_DEBUG
83	if (lock->magic != SPINLOCK_MAGIC)
84		BUG();
85	if (!spin_is_locked(lock))
86		BUG();
87#endif
88	__asm__ __volatile__(
89		spin_unlock_string
90	);
91}
92
93#else
94
95#define spin_unlock_string \
96	"xchgb %b0, %1" \
97		:"=q" (oldval), "=m" (lock->lock) \
98		:"0" (oldval) : "memory"
99
100static inline void spin_unlock(spinlock_t *lock)
101{
102	char oldval = 1;
103#if SPINLOCK_DEBUG
104	if (lock->magic != SPINLOCK_MAGIC)
105		BUG();
106	if (!spin_is_locked(lock))
107		BUG();
108#endif
109	__asm__ __volatile__(
110		spin_unlock_string
111	);
112}
113
114#endif
115
116static inline int spin_trylock(spinlock_t *lock)
117{
118	char oldval;
119	__asm__ __volatile__(
120		"xchgb %b0,%1"
121		:"=q" (oldval), "=m" (lock->lock)
122		:"0" (0) : "memory");
123	return oldval > 0;
124}
125
126static inline void spin_lock(spinlock_t *lock)
127{
128#if SPINLOCK_DEBUG
129	__label__ here;
130here:
131	if (lock->magic != SPINLOCK_MAGIC) {
132printk("eip: %p\n", &&here);
133		BUG();
134	}
135#endif
136	__asm__ __volatile__(
137		spin_lock_string
138		:"=m" (lock->lock) : : "memory");
139}
140
141
142/*
143 * Read-write spinlocks, allowing multiple readers
144 * but only one writer.
145 *
146 * NOTE! it is quite common to have readers in interrupts
147 * but no interrupt writers. For those circumstances we
148 * can "mix" irq-safe locks - any writer needs to get a
149 * irq-safe write-lock, but readers can get non-irqsafe
150 * read-locks.
151 */
152typedef struct {
153	volatile unsigned int lock;
154#if SPINLOCK_DEBUG
155	unsigned magic;
156#endif
157} rwlock_t;
158
159#define RWLOCK_MAGIC	0xdeaf1eed
160
161#if SPINLOCK_DEBUG
162#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
163#else
164#define RWLOCK_MAGIC_INIT	/* */
165#endif
166
167#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
168
169#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
170
171/*
172 * On x86, we implement read-write locks as a 32-bit counter
173 * with the high bit (sign) being the "contended" bit.
174 *
175 * The inline assembly is non-obvious. Think about it.
176 *
177 * Changed to use the same technique as rw semaphores.  See
178 * semaphore.h for details.  -ben
179 */
180/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
181
182static inline void read_lock(rwlock_t *rw)
183{
184#if SPINLOCK_DEBUG
185	if (rw->magic != RWLOCK_MAGIC)
186		BUG();
187#endif
188	__build_read_lock(rw, "__read_lock_failed");
189}
190
191static inline void write_lock(rwlock_t *rw)
192{
193#if SPINLOCK_DEBUG
194	if (rw->magic != RWLOCK_MAGIC)
195		BUG();
196#endif
197	__build_write_lock(rw, "__write_lock_failed");
198}
199
200#define read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
201#define write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
202
203static inline int write_trylock(rwlock_t *lock)
204{
205	atomic_t *count = (atomic_t *)lock;
206	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
207		return 1;
208	atomic_add(RW_LOCK_BIAS, count);
209	return 0;
210}
211
212#endif /* __ASM_SPINLOCK_H */
213