1#ifndef _PPC64_SEMAPHORE_H
2#define _PPC64_SEMAPHORE_H
3
4/*
5 * Remove spinlock-based RW semaphores; RW semaphore definitions are
6 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
7 * Rework semaphores to use atomic_dec_if_positive.
8 * -- Paul Mackerras (paulus@samba.org)
9 */
10
11#ifdef __KERNEL__
12
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19	/*
20	 * Note that any negative value of count is equivalent to 0,
21	 * but additionally indicates that some process(es) might be
22	 * sleeping on `wait'.
23	 */
24	atomic_t count;
25	wait_queue_head_t wait;
26#if WAITQUEUE_DEBUG
27	long __magic;
28#endif
29};
30
31#if WAITQUEUE_DEBUG
32# define __SEM_DEBUG_INIT(name) \
33		, (long)&(name).__magic
34#else
35# define __SEM_DEBUG_INIT(name)
36#endif
37
38#define __SEMAPHORE_INITIALIZER(name, count) \
39	{ ATOMIC_INIT(count), \
40	  __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
41	  __SEM_DEBUG_INIT(name) }
42
43#define __MUTEX_INITIALIZER(name) \
44	__SEMAPHORE_INITIALIZER(name, 1)
45
46#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
47	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
48
49#define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1)
50#define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name, 0)
51
52static inline void sema_init (struct semaphore *sem, int val)
53{
54	atomic_set(&sem->count, val);
55	init_waitqueue_head(&sem->wait);
56#if WAITQUEUE_DEBUG
57	sem->__magic = (long)&sem->__magic;
58#endif
59}
60
61static inline void init_MUTEX (struct semaphore *sem)
62{
63	sema_init(sem, 1);
64}
65
66static inline void init_MUTEX_LOCKED (struct semaphore *sem)
67{
68	sema_init(sem, 0);
69}
70
71extern void __down(struct semaphore * sem);
72extern int  __down_interruptible(struct semaphore * sem);
73extern void __up(struct semaphore * sem);
74
75static inline void down(struct semaphore * sem)
76{
77#if WAITQUEUE_DEBUG
78	CHECK_MAGIC(sem->__magic);
79#endif
80
81	/*
82	 * Try to get the semaphore, take the slow path if we fail.
83	 */
84	if (atomic_dec_return(&sem->count) < 0)
85		__down(sem);
86	smp_wmb();
87}
88
89static inline int down_interruptible(struct semaphore * sem)
90{
91	int ret = 0;
92
93#if WAITQUEUE_DEBUG
94	CHECK_MAGIC(sem->__magic);
95#endif
96
97	if (atomic_dec_return(&sem->count) < 0)
98		ret = __down_interruptible(sem);
99	smp_wmb();
100	return ret;
101}
102
103static inline int down_trylock(struct semaphore * sem)
104{
105	int ret;
106
107#if WAITQUEUE_DEBUG
108	CHECK_MAGIC(sem->__magic);
109#endif
110
111	ret = atomic_dec_if_positive(&sem->count) < 0;
112	smp_wmb();
113	return ret;
114}
115
116static inline void up(struct semaphore * sem)
117{
118#if WAITQUEUE_DEBUG
119	CHECK_MAGIC(sem->__magic);
120#endif
121
122	smp_wmb();
123	if (atomic_inc_return(&sem->count) <= 0)
124		__up(sem);
125}
126
127static inline int sem_getcount(struct semaphore *sem)
128{
129	return atomic_read(&sem->count);
130}
131
132#endif /* __KERNEL__ */
133
134#endif /* !(_PPC64_SEMAPHORE_H) */
135