1/*
2 * SMP- and interrupt-safe semaphores helper functions.
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 * (C) Copyright 1999 Andrea Arcangeli
6 * (C) Copyright 1999, 2001, 2002 Ralf Baechle
7 * (C) Copyright 1999, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SEMAPHORE_HELPER_H
10#define _ASM_SEMAPHORE_HELPER_H
11
12#include <linux/errno.h>
13
14/*
15 * These two _must_ execute atomically wrt each other.
16 */
17static inline void wake_one_more(struct semaphore * sem)
18{
19	atomic_inc(&sem->waking);
20}
21
22static inline int waking_non_zero(struct semaphore *sem)
23{
24	int ret, tmp;
25
26	__asm__ __volatile__(
27	"1:\tll\t%1, %2\t\t\t# waking_non_zero\n\t"
28	"blez\t%1, 2f\n\t"
29	"subu\t%0, %1, 1\n\t"
30	"sc\t%0, %2\n\t"
31	"beqz\t%0, 1b\n"
32	"2:"
33	: "=r" (ret), "=r" (tmp), "+m" (sem->waking)
34	: "0" (0));
35
36	return ret;
37}
38
39/*
40 * waking_non_zero_interruptible:
41 *	1	got the lock
42 *	0	go to sleep
43 *	-EINTR	interrupted
44 *
45 * We must undo the sem->count down_interruptible decrement
46 * simultaneously and atomically with the sem->waking adjustment,
47 * otherwise we can race with wake_one_more.
48 *
49 * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
50 *
51 * Pseudocode:
52 *
53 * If(sem->waking > 0) {
54 *	Decrement(sem->waking)
55 *	Return(SUCCESS)
56 * } else If(signal_pending(tsk)) {
57 *	Increment(sem->count)
58 *	Return(-EINTR)
59 * } else {
60 *	Return(SLEEP)
61 * }
62 */
63
64static inline int
65waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
66{
67	long ret, tmp;
68
69        __asm__ __volatile__(
70	".set\tpush\t\t\t# waking_non_zero_interruptible\n\t"
71	".set\tnoat\n\t"
72	"0:\tlld\t%1, %2\n\t"
73	"li\t%0, 0\n\t"
74	"sll\t$1, %1, 0\n\t"
75	"blez\t$1, 1f\n\t"
76	"daddiu\t%1, %1, -1\n\t"
77	"li\t%0, 1\n\t"
78	"b\t2f\n\t"
79	"1:\tbeqz\t%3, 2f\n\t"
80	"li\t%0, %4\n\t"
81	"dli\t$1, 0x0000000100000000\n\t"
82	"daddu\t%1, %1, $1\n\t"
83	"2:\tscd\t%1, %2\n\t"
84	"beqz\t%1, 0b\n\t"
85	".set\tpop"
86	: "=&r" (ret), "=&r" (tmp), "=m" (*sem)
87	: "r" (signal_pending(tsk)), "i" (-EINTR));
88
89	return ret;
90}
91
92/*
93 * waking_non_zero_trylock is unused.  we do everything in
94 * down_trylock and let non-ll/sc hosts bounce around.
95 */
96
97static inline int waking_non_zero_trylock(struct semaphore *sem)
98{
99#if WAITQUEUE_DEBUG
100	CHECK_MAGIC(sem->__magic);
101#endif
102
103	return 0;
104}
105
106#endif /* _ASM_SEMAPHORE_HELPER_H */
107