1#ifndef _SPARC64_SEMAPHORE_H
2#define _SPARC64_SEMAPHORE_H
3
4/* These are actually reasonable on the V9.
5 *
6 * See asm-ppc/semaphore.h for implementation commentary,
7 * only sparc64 specific issues are commented here.
8 */
9#ifdef __KERNEL__
10
11#include <asm/atomic.h>
12#include <asm/system.h>
13#include <linux/wait.h>
14#include <linux/rwsem.h>
15
16struct semaphore {
17	atomic_t count;
18	wait_queue_head_t wait;
19};
20
21#define __SEMAPHORE_INITIALIZER(name, count) \
22	{ ATOMIC_INIT(count), \
23	  __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
24
25#define __MUTEX_INITIALIZER(name) \
26	__SEMAPHORE_INITIALIZER(name, 1)
27
28#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
29	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
30
31#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1)
32#define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name, 0)
33
34static inline void sema_init (struct semaphore *sem, int val)
35{
36	atomic_set(&sem->count, val);
37	init_waitqueue_head(&sem->wait);
38}
39
40static inline void init_MUTEX (struct semaphore *sem)
41{
42	sema_init(sem, 1);
43}
44
45static inline void init_MUTEX_LOCKED (struct semaphore *sem)
46{
47	sema_init(sem, 0);
48}
49
50extern void __down(struct semaphore * sem);
51extern int  __down_interruptible(struct semaphore * sem);
52extern void __up(struct semaphore * sem);
53
54static __inline__ void down(struct semaphore * sem)
55{
56	/* This atomically does:
57	 * 	old_val = sem->count;
58	 *	new_val = sem->count - 1;
59	 *	sem->count = new_val;
60	 *	if (old_val < 1)
61	 *		__down(sem);
62	 *
63	 * The (old_val < 1) test is equivalent to
64	 * the more straightforward (new_val < 0),
65	 * but it is easier to test the former because
66	 * of how the CAS instruction works.
67	 */
68
69	__asm__ __volatile__("\n"
70"	! down sem(%0)\n"
71"1:	lduw	[%0], %%g5\n"
72"	sub	%%g5, 1, %%g7\n"
73"	cas	[%0], %%g5, %%g7\n"
74"	cmp	%%g5, %%g7\n"
75"	bne,pn	%%icc, 1b\n"
76"	 cmp	%%g7, 1\n"
77"	bl,pn	%%icc, 3f\n"
78"	 membar	#StoreLoad | #StoreStore\n"
79"2:\n"
80"	.subsection 2\n"
81"3:	mov	%0, %%g5\n"
82"	save	%%sp, -160, %%sp\n"
83"	mov	%%g1, %%l1\n"
84"	mov	%%g2, %%l2\n"
85"	mov	%%g3, %%l3\n"
86"	call	%1\n"
87"	 mov	%%g5, %%o0\n"
88"	mov	%%l1, %%g1\n"
89"	mov	%%l2, %%g2\n"
90"	ba,pt	%%xcc, 2b\n"
91"	 restore %%l3, %%g0, %%g3\n"
92"	.previous\n"
93	: : "r" (sem), "i" (__down)
94	: "g5", "g7", "memory", "cc");
95}
96
97static __inline__ int down_interruptible(struct semaphore *sem)
98{
99	int ret = 0;
100
101	/* This atomically does:
102	 * 	old_val = sem->count;
103	 *	new_val = sem->count - 1;
104	 *	sem->count = new_val;
105	 *	if (old_val < 1)
106	 *		ret = __down_interruptible(sem);
107	 *
108	 * The (old_val < 1) test is equivalent to
109	 * the more straightforward (new_val < 0),
110	 * but it is easier to test the former because
111	 * of how the CAS instruction works.
112	 */
113
114	__asm__ __volatile__("\n"
115"	! down_interruptible sem(%2) ret(%0)\n"
116"1:	lduw	[%2], %%g5\n"
117"	sub	%%g5, 1, %%g7\n"
118"	cas	[%2], %%g5, %%g7\n"
119"	cmp	%%g5, %%g7\n"
120"	bne,pn	%%icc, 1b\n"
121"	 cmp	%%g7, 1\n"
122"	bl,pn	%%icc, 3f\n"
123"	 membar	#StoreLoad | #StoreStore\n"
124"2:\n"
125"	.subsection 2\n"
126"3:	mov	%2, %%g5\n"
127"	save	%%sp, -160, %%sp\n"
128"	mov	%%g1, %%l1\n"
129"	mov	%%g2, %%l2\n"
130"	mov	%%g3, %%l3\n"
131"	call	%3\n"
132"	 mov	%%g5, %%o0\n"
133"	mov	%%l1, %%g1\n"
134"	mov	%%l2, %%g2\n"
135"	mov	%%l3, %%g3\n"
136"	ba,pt	%%xcc, 2b\n"
137"	 restore %%o0, %%g0, %0\n"
138"	.previous\n"
139	: "=r" (ret)
140	: "0" (ret), "r" (sem), "i" (__down_interruptible)
141	: "g5", "g7", "memory", "cc");
142	return ret;
143}
144
145static __inline__ int down_trylock(struct semaphore *sem)
146{
147	int ret;
148
149	/* This atomically does:
150	 * 	old_val = sem->count;
151	 *	new_val = sem->count - 1;
152	 *	if (old_val < 1) {
153	 *		ret = 1;
154	 *	} else {
155	 *		sem->count = new_val;
156	 *		ret = 0;
157	 *	}
158	 *
159	 * The (old_val < 1) test is equivalent to
160	 * the more straightforward (new_val < 0),
161	 * but it is easier to test the former because
162	 * of how the CAS instruction works.
163	 */
164
165	__asm__ __volatile__("\n"
166"	! down_trylock sem(%1) ret(%0)\n"
167"1:	lduw	[%1], %%g5\n"
168"	sub	%%g5, 1, %%g7\n"
169"	cmp	%%g5, 1\n"
170"	bl,pn	%%icc, 2f\n"
171"	 mov	1, %0\n"
172"	cas	[%1], %%g5, %%g7\n"
173"	cmp	%%g5, %%g7\n"
174"	bne,pn	%%icc, 1b\n"
175"	 mov	0, %0\n"
176"	membar	#StoreLoad | #StoreStore\n"
177"2:\n"
178	: "=&r" (ret)
179	: "r" (sem)
180	: "g5", "g7", "memory", "cc");
181
182	return ret;
183}
184
185static __inline__ void up(struct semaphore * sem)
186{
187	/* This atomically does:
188	 * 	old_val = sem->count;
189	 *	new_val = sem->count + 1;
190	 *	sem->count = new_val;
191	 *	if (old_val < 0)
192	 *		__up(sem);
193	 *
194	 * The (old_val < 0) test is equivalent to
195	 * the more straightforward (new_val <= 0),
196	 * but it is easier to test the former because
197	 * of how the CAS instruction works.
198	 */
199
200	__asm__ __volatile__("\n"
201"	! up sem(%0)\n"
202"	membar	#StoreLoad | #LoadLoad\n"
203"1:	lduw	[%0], %%g5\n"
204"	add	%%g5, 1, %%g7\n"
205"	cas	[%0], %%g5, %%g7\n"
206"	cmp	%%g5, %%g7\n"
207"	bne,pn	%%icc, 1b\n"
208"	 addcc	%%g7, 1, %%g0\n"
209"	ble,pn	%%icc, 3f\n"
210"	 membar	#StoreLoad | #StoreStore\n"
211"2:\n"
212"	.subsection 2\n"
213"3:	mov	%0, %%g5\n"
214"	save	%%sp, -160, %%sp\n"
215"	mov	%%g1, %%l1\n"
216"	mov	%%g2, %%l2\n"
217"	mov	%%g3, %%l3\n"
218"	call	%1\n"
219"	 mov	%%g5, %%o0\n"
220"	mov	%%l1, %%g1\n"
221"	mov	%%l2, %%g2\n"
222"	ba,pt	%%xcc, 2b\n"
223"	 restore %%l3, %%g0, %%g3\n"
224"	.previous\n"
225	: : "r" (sem), "i" (__up)
226	: "g5", "g7", "memory", "cc");
227}
228
229static inline int sem_getcount(struct semaphore *sem)
230{
231	return atomic_read(&sem->count);
232}
233
234#endif /* __KERNEL__ */
235
236#endif /* !(_SPARC64_SEMAPHORE_H) */
237