1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <linux/kernel.h>
8#include <linux/config.h>
9
10extern int printk(const char * fmt, ...)
11	__attribute__ ((format (printf, 1, 2)));
12
13/* It seems that people are forgetting to
14 * initialize their spinlocks properly, tsk tsk.
15 * Remember to turn this off in 2.4. -ben
16 */
17#if defined(CONFIG_DEBUG_SPINLOCK)
18#define SPINLOCK_DEBUG	1
19#else
20#define SPINLOCK_DEBUG	0
21#endif
22
23/*
24 * Your basic SMP spinlocks, allowing only a single CPU anywhere
25 */
26
27typedef struct {
28	volatile unsigned int lock;
29#if SPINLOCK_DEBUG
30	unsigned magic;
31#endif
32} spinlock_t;
33
34#define SPINLOCK_MAGIC	0xdead4ead
35
36#if SPINLOCK_DEBUG
37#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
38#else
39#define SPINLOCK_MAGIC_INIT	/* */
40#endif
41
42#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
43
44#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
45
46/*
47 * Simple spin lock operations.  There are two variants, one clears IRQ's
48 * on the local processor, one does not.
49 *
50 * We make no fairness assumptions. They have a cost.
51 */
52
53#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->lock) <= 0)
54#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
55
56#define spin_lock_string \
57	"\n1:\t" \
58	"lock ; decb %0\n\t" \
59	"js 2f\n" \
60	".section .text.lock,\"ax\"\n" \
61	"2:\t" \
62	"cmpb $0,%0\n\t" \
63	"rep;nop\n\t" \
64	"jle 2b\n\t" \
65	"jmp 1b\n" \
66	".previous"
67
68/*
69 * This works. Despite all the confusion.
70 */
71#define spin_unlock_string \
72	"movb $1,%0"
73
74static inline int spin_trylock(spinlock_t *lock)
75{
76	char oldval;
77	__asm__ __volatile__(
78		"xchgb %b0,%1"
79		:"=q" (oldval), "=m" (lock->lock)
80		:"0" (0) : "memory");
81	return oldval > 0;
82}
83
84static inline void spin_lock(spinlock_t *lock)
85{
86#if SPINLOCK_DEBUG
87	__label__ here;
88here:
89	if (lock->magic != SPINLOCK_MAGIC) {
90printk("eip: %p\n", &&here);
91		out_of_line_bug();
92	}
93#endif
94	__asm__ __volatile__(
95		spin_lock_string
96		:"=m" (lock->lock) : : "memory");
97}
98
99static inline void spin_unlock(spinlock_t *lock)
100{
101#if SPINLOCK_DEBUG
102	if (lock->magic != SPINLOCK_MAGIC)
103		out_of_line_bug();
104	if (!spin_is_locked(lock))
105		out_of_line_bug();
106#endif
107	__asm__ __volatile__(
108		spin_unlock_string
109		:"=m" (lock->lock) : : "memory");
110}
111
112/*
113 * Read-write spinlocks, allowing multiple readers
114 * but only one writer.
115 *
116 * NOTE! it is quite common to have readers in interrupts
117 * but no interrupt writers. For those circumstances we
118 * can "mix" irq-safe locks - any writer needs to get a
119 * irq-safe write-lock, but readers can get non-irqsafe
120 * read-locks.
121 */
122typedef struct {
123	volatile unsigned int lock;
124#if SPINLOCK_DEBUG
125	unsigned magic;
126#endif
127} rwlock_t;
128
129#define RWLOCK_MAGIC	0xdeaf1eed
130
131#if SPINLOCK_DEBUG
132#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
133#else
134#define RWLOCK_MAGIC_INIT	/* */
135#endif
136
137#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
138
139#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
140
141/*
142 * On x86, we implement read-write locks as a 32-bit counter
143 * with the high bit (sign) being the "contended" bit.
144 *
145 * The inline assembly is non-obvious. Think about it.
146 *
147 * Changed to use the same technique as rw semaphores.  See
148 * semaphore.h for details.  -ben
149 */
150/* the spinlock helpers are in arch/x86_64/kernel/semaphore.S */
151
152extern inline void read_lock(rwlock_t *rw)
153{
154#if SPINLOCK_DEBUG
155	if (rw->magic != RWLOCK_MAGIC)
156		out_of_line_bug();
157#endif
158	__build_read_lock(rw, "__read_lock_failed");
159}
160
161static inline void write_lock(rwlock_t *rw)
162{
163#if SPINLOCK_DEBUG
164	if (rw->magic != RWLOCK_MAGIC)
165		out_of_line_bug();
166#endif
167	__build_write_lock(rw, "__write_lock_failed");
168}
169
170#define read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
171#define write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
172
173static inline int write_trylock(rwlock_t *lock)
174{
175	atomic_t *count = (atomic_t *)lock;
176	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
177		return 1;
178	atomic_add(RW_LOCK_BIAS, count);
179	return 0;
180}
181
182#endif /* __ASM_SPINLOCK_H */
183