1#ifndef _ASM_M32R_SEMAPHORE_H
2#define _ASM_M32R_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * Copyright (C) 1996  Linus Torvalds
12 * Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
13 */
14
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17#include <asm/assembler.h>
18#include <asm/system.h>
19#include <asm/atomic.h>
20
21struct semaphore {
22	atomic_t count;
23	int sleepers;
24	wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n)				\
28{									\
29	.count		= ATOMIC_INIT(n),				\
30	.sleepers	= 0,						\
31	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
39
40static inline void sema_init (struct semaphore *sem, int val)
41{
42/*
43 *	*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
44 *
45 * i'd rather use the more flexible initialization above, but sadly
46 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
47 */
48	atomic_set(&sem->count, val);
49	sem->sleepers = 0;
50	init_waitqueue_head(&sem->wait);
51}
52
53static inline void init_MUTEX (struct semaphore *sem)
54{
55	sema_init(sem, 1);
56}
57
58static inline void init_MUTEX_LOCKED (struct semaphore *sem)
59{
60	sema_init(sem, 0);
61}
62
63asmlinkage void __down_failed(void /* special register calling convention */);
64asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
65asmlinkage int  __down_failed_trylock(void  /* params in registers */);
66asmlinkage void __up_wakeup(void /* special register calling convention */);
67
68asmlinkage void __down(struct semaphore * sem);
69asmlinkage int  __down_interruptible(struct semaphore * sem);
70asmlinkage int  __down_trylock(struct semaphore * sem);
71asmlinkage void __up(struct semaphore * sem);
72
73/*
74 * Atomically decrement the semaphore's count.  If it goes negative,
75 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
76 */
77static inline void down(struct semaphore * sem)
78{
79	might_sleep();
80	if (unlikely(atomic_dec_return(&sem->count) < 0))
81		__down(sem);
82}
83
84/*
85 * Interruptible try to acquire a semaphore.  If we obtained
86 * it, return zero.  If we were interrupted, returns -EINTR
87 */
88static inline int down_interruptible(struct semaphore * sem)
89{
90	int result = 0;
91
92	might_sleep();
93	if (unlikely(atomic_dec_return(&sem->count) < 0))
94		result = __down_interruptible(sem);
95
96	return result;
97}
98
99/*
100 * Non-blockingly attempt to down() a semaphore.
101 * Returns zero if we acquired it
102 */
103static inline int down_trylock(struct semaphore * sem)
104{
105	unsigned long flags;
106	long count;
107	int result = 0;
108
109	local_irq_save(flags);
110	__asm__ __volatile__ (
111		"# down_trylock			\n\t"
112		DCACHE_CLEAR("%0", "r4", "%1")
113		M32R_LOCK" %0, @%1;		\n\t"
114		"addi	%0, #-1;		\n\t"
115		M32R_UNLOCK" %0, @%1;		\n\t"
116		: "=&r" (count)
117		: "r" (&sem->count)
118		: "memory"
119#ifdef CONFIG_CHIP_M32700_TS1
120		, "r4"
121#endif	/* CONFIG_CHIP_M32700_TS1 */
122	);
123	local_irq_restore(flags);
124
125	if (unlikely(count < 0))
126		result = __down_trylock(sem);
127
128	return result;
129}
130
131/*
132 * Note! This is subtle. We jump to wake people up only if
133 * the semaphore was negative (== somebody was waiting on it).
134 * The default case (no contention) will result in NO
135 * jumps for both down() and up().
136 */
137static inline void up(struct semaphore * sem)
138{
139	if (unlikely(atomic_inc_return(&sem->count) <= 0))
140		__up(sem);
141}
142
143#endif  /* __KERNEL__ */
144
145#endif  /* _ASM_M32R_SEMAPHORE_H */
146