1/*
2 * BK Id: %F% %I% %G% %U% %#%
3 */
4/*
5 * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
6 * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
7 * by Paul Mackerras <paulus@samba.org>.
8 */
9
10#ifndef _PPC_RWSEM_H
11#define _PPC_RWSEM_H
12
13#ifdef __KERNEL__
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <asm/atomic.h>
17#include <asm/system.h>
18
19/*
20 * the semaphore definition
21 */
22struct rw_semaphore {
23	signed long		count;
24#define RWSEM_UNLOCKED_VALUE		0x00000000
25#define RWSEM_ACTIVE_BIAS		0x00000001
26#define RWSEM_ACTIVE_MASK		0x0000ffff
27#define RWSEM_WAITING_BIAS		(-0x00010000)
28#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
29#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
30	spinlock_t		wait_lock;
31	struct list_head	wait_list;
32#if RWSEM_DEBUG
33	int			debug;
34#endif
35};
36
37/*
38 * initialisation
39 */
40#if RWSEM_DEBUG
41#define __RWSEM_DEBUG_INIT      , 0
42#else
43#define __RWSEM_DEBUG_INIT	/* */
44#endif
45
46#define __RWSEM_INITIALIZER(name) \
47	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
48	  LIST_HEAD_INIT((name).wait_list) \
49	  __RWSEM_DEBUG_INIT }
50
51#define DECLARE_RWSEM(name)		\
52	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
53
54extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
55extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
56extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
57
58static inline void init_rwsem(struct rw_semaphore *sem)
59{
60	sem->count = RWSEM_UNLOCKED_VALUE;
61	spin_lock_init(&sem->wait_lock);
62	INIT_LIST_HEAD(&sem->wait_list);
63#if RWSEM_DEBUG
64	sem->debug = 0;
65#endif
66}
67
68/*
69 * lock for reading
70 */
71static inline void __down_read(struct rw_semaphore *sem)
72{
73	if (atomic_inc_return((atomic_t *)(&sem->count)) >= 0)
74		smp_wmb();
75	else
76		rwsem_down_read_failed(sem);
77}
78
79static inline int __down_read_trylock(struct rw_semaphore *sem)
80{
81	int tmp;
82
83	while ((tmp = sem->count) >= 0) {
84		if (tmp == cmpxchg(&sem->count, tmp,
85				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
86			smp_wmb();
87			return 1;
88		}
89	}
90	return 0;
91}
92
93/*
94 * lock for writing
95 */
96static inline void __down_write(struct rw_semaphore *sem)
97{
98	int tmp;
99
100	tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
101				(atomic_t *)(&sem->count));
102	if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
103		smp_wmb();
104	else
105		rwsem_down_write_failed(sem);
106}
107
108static inline int __down_write_trylock(struct rw_semaphore *sem)
109{
110	int tmp;
111
112	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
113		      RWSEM_ACTIVE_WRITE_BIAS);
114	smp_wmb();
115	return tmp == RWSEM_UNLOCKED_VALUE;
116}
117
118/*
119 * unlock after reading
120 */
121static inline void __up_read(struct rw_semaphore *sem)
122{
123	int tmp;
124
125	smp_wmb();
126	tmp = atomic_dec_return((atomic_t *)(&sem->count));
127	if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
128		rwsem_wake(sem);
129}
130
131/*
132 * unlock after writing
133 */
134static inline void __up_write(struct rw_semaphore *sem)
135{
136	smp_wmb();
137	if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
138			      (atomic_t *)(&sem->count)) < 0)
139		rwsem_wake(sem);
140}
141
142/*
143 * implement atomic add functionality
144 */
145static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
146{
147	atomic_add(delta, (atomic_t *)(&sem->count));
148}
149
150/*
151 * implement exchange and add functionality
152 */
153static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
154{
155	smp_mb();
156	return atomic_add_return(delta, (atomic_t *)(&sem->count));
157}
158
159#endif /* __KERNEL__ */
160#endif /* _PPC_RWSEM_XADD_H */
161