1/*
2 * SMP- and interrupt-safe semaphores helper functions.
3 *
4 * Copyright (C) 1996 Linus Torvalds
5 * Copyright (C) 1999 Andrea Arcangeli
6 * Copyright (C) 1999 Ralf Baechle
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2000 MIPS Technologies, Inc.
9 */
10#ifndef _ASM_SEMAPHORE_HELPER_H
11#define _ASM_SEMAPHORE_HELPER_H
12
13#include <linux/config.h>
14
15#define sem_read(a) ((a)->counter)
16#define sem_inc(a) (((a)->counter)++)
17#define sem_dec(a) (((a)->counter)--)
18/*
19 * These two _must_ execute atomically wrt each other.
20 */
21static inline void wake_one_more(struct semaphore * sem)
22{
23	atomic_inc(&sem->waking);
24}
25
26#ifdef CONFIG_CPU_HAS_LLSC
27
28static inline int waking_non_zero(struct semaphore *sem)
29{
30	int ret, tmp;
31
32	__asm__ __volatile__(
33	"1:\tll\t%1, %2\t\t\t# waking_non_zero\n\t"
34	"blez\t%1, 2f\n\t"
35	"subu\t%0, %1, 1\n\t"
36	"sc\t%0, %2\n\t"
37	"beqz\t%0, 1b\n"
38	"2:"
39	: "=r" (ret), "=r" (tmp), "+m" (sem->waking)
40	: "0"(0));
41
42	return ret;
43}
44
45#else /* !CONFIG_CPU_HAS_LLSC */
46
47/*
48 * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again.
49 * Do it once and that's it. ll/sc *has* it's advantages. HK
50 */
51
52static inline int waking_non_zero(struct semaphore *sem)
53{
54	unsigned long flags;
55	int ret = 0;
56
57	local_irq_save(flags);
58	if (sem_read(&sem->waking) > 0) {
59		sem_dec(&sem->waking);
60		ret = 1;
61	}
62	local_irq_restore(flags);
63	return ret;
64}
65#endif /* !CONFIG_CPU_HAS_LLSC */
66
67#ifdef CONFIG_CPU_HAS_LLDSCD
68
69/*
70 * waking_non_zero_interruptible:
71 *	1	got the lock
72 *	0	go to sleep
73 *	-EINTR	interrupted
74 *
75 * We must undo the sem->count down_interruptible decrement
76 * simultaneously and atomically with the sem->waking adjustment,
77 * otherwise we can race with wake_one_more.
78 *
79 * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
80 *
81 * This is crazy.  Normally it's strictly forbidden to use 64-bit operations
82 * in the 32-bit MIPS kernel.  In this case it's however ok because if an
83 * interrupt has destroyed the upper half of registers sc will fail.
84 * Note also that this will not work for MIPS32 CPUs!
85 *
86 * Pseudocode:
87 *
88 * If(sem->waking > 0) {
89 *	Decrement(sem->waking)
90 *	Return(SUCCESS)
91 * } else If(signal_pending(tsk)) {
92 *	Increment(sem->count)
93 *	Return(-EINTR)
94 * } else {
95 *	Return(SLEEP)
96 * }
97 */
98
99static inline int
100waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
101{
102	long ret, tmp;
103
104	__asm__ __volatile__(
105	".set\tpush\n\t"
106	".set\tmips3\n\t"
107	".set\tnoat\n"
108	"0:\tlld\t%1, %2\n\t"
109	"li\t%0, 0\n\t"
110	"sll\t$1, %1, 0\n\t"
111	"blez\t$1, 1f\n\t"
112	"daddiu\t%1, %1, -1\n\t"
113	"li\t%0, 1\n\t"
114	"b\t2f\n"
115	"1:\tbeqz\t%3, 2f\n\t"
116	"li\t%0, %4\n\t"
117	"dli\t$1, 0x0000000100000000\n\t"
118	"daddu\t%1, %1, $1\n"
119	"2:\tscd\t%1, %2\n\t"
120	"beqz\t%1, 0b\n\t"
121	".set\tpop"
122	: "=&r" (ret), "=&r" (tmp), "=m" (*sem)
123	: "r" (signal_pending(tsk)), "i" (-EINTR));
124
125	return ret;
126}
127
128/*
129 * waking_non_zero_trylock is unused.  we do everything in
130 * down_trylock and let non-ll/sc hosts bounce around.
131 */
132
133static inline int waking_non_zero_trylock(struct semaphore *sem)
134{
135#if WAITQUEUE_DEBUG
136	CHECK_MAGIC(sem->__magic);
137#endif
138
139	return 0;
140}
141
142#else /* !CONFIG_CPU_HAS_LLDSCD */
143
144static inline int waking_non_zero_interruptible(struct semaphore *sem,
145						struct task_struct *tsk)
146{
147	int ret = 0;
148	unsigned long flags;
149
150	local_irq_save(flags);
151	if (sem_read(&sem->waking) > 0) {
152		sem_dec(&sem->waking);
153		ret = 1;
154	} else if (signal_pending(tsk)) {
155		sem_inc(&sem->count);
156		ret = -EINTR;
157	}
158	local_irq_restore(flags);
159	return ret;
160}
161
162static inline int waking_non_zero_trylock(struct semaphore *sem)
163{
164        int ret = 1;
165	unsigned long flags;
166
167	local_irq_save(flags);
168	if (sem_read(&sem->waking) <= 0)
169		sem_inc(&sem->count);
170	else {
171		sem_dec(&sem->waking);
172		ret = 0;
173	}
174	local_irq_restore(flags);
175
176	return ret;
177}
178
179#endif /* !CONFIG_CPU_HAS_LLDSCD */
180
181#endif /* _ASM_SEMAPHORE_HELPER_H */
182