1/*
2 * BK Id: %F% %I% %G% %U% %#%
3 */
4#ifndef __ASM_SPINLOCK_H
5#define __ASM_SPINLOCK_H
6
7#include <asm/system.h>
8
9#if defined(CONFIG_DEBUG_SPINLOCK)
10#define SPINLOCK_DEBUG 1
11#else
12#define SPINLOCK_DEBUG 0
13#endif
14
15/*
16 * Simple spin lock operations.
17 */
18
19typedef struct {
20	volatile unsigned long lock;
21#if SPINLOCK_DEBUG
22	volatile unsigned long owner_pc;
23	volatile unsigned long owner_cpu;
24#endif
25} spinlock_t;
26
27#ifdef __KERNEL__
28#if SPINLOCK_DEBUG
29#define SPINLOCK_DEBUG_INIT     , 0, 0
30#else
31#define SPINLOCK_DEBUG_INIT     /* */
32#endif
33
34#define SPIN_LOCK_UNLOCKED	(spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
35
36#define spin_lock_init(x) 	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
37#define spin_is_locked(x)	((x)->lock != 0)
38#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
39
40#if SPINLOCK_DEBUG
41
42extern void _spin_lock(spinlock_t *lock);
43extern void _spin_unlock(spinlock_t *lock);
44extern int spin_trylock(spinlock_t *lock);
45extern unsigned long __spin_trylock(volatile unsigned long *lock);
46
47#define spin_lock(lp)			_spin_lock(lp)
48#define spin_unlock(lp)			_spin_unlock(lp)
49
50#else /* ! SPINLOCK_DEBUG */
51
52static inline void spin_lock(spinlock_t *lock)
53{
54	unsigned long tmp;
55
56	__asm__ __volatile__(
57	"b	1f			# spin_lock\n\
582:	lwzx	%0,0,%1\n\
59	cmpwi	0,%0,0\n\
60	bne+	2b\n\
611:	lwarx	%0,0,%1\n\
62	cmpwi	0,%0,0\n\
63	bne-	2b\n\
64	stwcx.	%2,0,%1\n\
65	bne-	2b\n\
66	isync"
67	: "=&r"(tmp)
68	: "r"(&lock->lock), "r"(1)
69	: "cr0", "memory");
70}
71
72static inline void spin_unlock(spinlock_t *lock)
73{
74	__asm__ __volatile__("eieio		# spin_unlock": : :"memory");
75	lock->lock = 0;
76}
77
78#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
79
80#endif
81
82/*
83 * Read-write spinlocks, allowing multiple readers
84 * but only one writer.
85 *
86 * NOTE! it is quite common to have readers in interrupts
87 * but no interrupt writers. For those circumstances we
88 * can "mix" irq-safe locks - any writer needs to get a
89 * irq-safe write-lock, but readers can get non-irqsafe
90 * read-locks.
91 */
92typedef struct {
93	volatile unsigned long lock;
94#if SPINLOCK_DEBUG
95	volatile unsigned long owner_pc;
96#endif
97} rwlock_t;
98
99#if SPINLOCK_DEBUG
100#define RWLOCK_DEBUG_INIT     , 0
101#else
102#define RWLOCK_DEBUG_INIT     /* */
103#endif
104
105#define RW_LOCK_UNLOCKED (rwlock_t) { 0 RWLOCK_DEBUG_INIT }
106#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
107
108#if SPINLOCK_DEBUG
109
110extern void _read_lock(rwlock_t *rw);
111extern void _read_unlock(rwlock_t *rw);
112extern void _write_lock(rwlock_t *rw);
113extern void _write_unlock(rwlock_t *rw);
114
115#define read_lock(rw)		_read_lock(rw)
116#define write_lock(rw)		_write_lock(rw)
117#define write_unlock(rw)	_write_unlock(rw)
118#define read_unlock(rw)		_read_unlock(rw)
119
120#else /* ! SPINLOCK_DEBUG */
121
122static __inline__ void read_lock(rwlock_t *rw)
123{
124	unsigned int tmp;
125
126	__asm__ __volatile__(
127	"b		2f		# read_lock\n\
1281:	lwzx		%0,0,%1\n\
129	cmpwi		0,%0,0\n\
130	blt+		1b\n\
1312:	lwarx		%0,0,%1\n\
132	addic.		%0,%0,1\n\
133	ble-		1b\n\
134	stwcx.		%0,0,%1\n\
135	bne-		2b\n\
136	isync"
137	: "=&r"(tmp)
138	: "r"(&rw->lock)
139	: "cr0", "memory");
140}
141
142static __inline__ void read_unlock(rwlock_t *rw)
143{
144	unsigned int tmp;
145
146	__asm__ __volatile__(
147	"eieio				# read_unlock\n\
1481:	lwarx		%0,0,%1\n\
149	addic		%0,%0,-1\n\
150	stwcx.		%0,0,%1\n\
151	bne-		1b"
152	: "=&r"(tmp)
153	: "r"(&rw->lock)
154	: "cr0", "memory");
155}
156
157static __inline__ void write_lock(rwlock_t *rw)
158{
159	unsigned int tmp;
160
161	__asm__ __volatile__(
162	"b		2f		# write_lock\n\
1631:	lwzx		%0,0,%1\n\
164	cmpwi		0,%0,0\n\
165	bne+		1b\n\
1662:	lwarx		%0,0,%1\n\
167	cmpwi		0,%0,0\n\
168	bne-		1b\n\
169	stwcx.		%2,0,%1\n\
170	bne-		2b\n\
171	isync"
172	: "=&r"(tmp)
173	: "r"(&rw->lock), "r"(-1)
174	: "cr0", "memory");
175}
176
177static __inline__ void write_unlock(rwlock_t *rw)
178{
179	__asm__ __volatile__("eieio		# write_unlock": : :"memory");
180	rw->lock = 0;
181}
182
183#endif
184
185#endif /* __ASM_SPINLOCK_H */
186#endif /* __KERNEL__ */
187