1/*
2 * AVR32 sempahore implementation.
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * Based on linux/arch/i386/kernel/semaphore.c
7 *  Copyright (C) 1999 Linus Torvalds
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/module.h>
17
18#include <asm/semaphore.h>
19#include <asm/atomic.h>
20
21/*
22 * Semaphores are implemented using a two-way counter:
23 * The "count" variable is decremented for each process
24 * that tries to acquire the semaphore, while the "sleeping"
25 * variable is a count of such acquires.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * "sleeping" and the contention routine ordering is protected
33 * by the spinlock in the semaphore's waitqueue head.
34 *
35 * Note that these functions are only called when there is
36 * contention on the lock, and as such all this is the
37 * "non-critical" part of the whole semaphore business. The
38 * critical part is the inline stuff in <asm/semaphore.h>
39 * where we want to avoid any extra jumps and calls.
40 */
41
42/*
43 * Logic:
44 *  - only on a boundary condition do we need to care. When we go
45 *    from a negative count to a non-negative, we wake people up.
46 *  - when we go from a non-negative count to a negative do we
47 *    (a) synchronize with the "sleeper" count and (b) make sure
48 *    that we're on the wakeup list before we synchronize so that
49 *    we cannot lose wakeup events.
50 */
51
52void __up(struct semaphore *sem)
53{
54	wake_up(&sem->wait);
55}
56EXPORT_SYMBOL(__up);
57
58void __sched __down(struct semaphore *sem)
59{
60	struct task_struct *tsk = current;
61        DECLARE_WAITQUEUE(wait, tsk);
62        unsigned long flags;
63
64        tsk->state = TASK_UNINTERRUPTIBLE;
65        spin_lock_irqsave(&sem->wait.lock, flags);
66        add_wait_queue_exclusive_locked(&sem->wait, &wait);
67
68        sem->sleepers++;
69        for (;;) {
70                int sleepers = sem->sleepers;
71
72                /*
73                 * Add "everybody else" into it. They aren't
74                 * playing, because we own the spinlock in
75                 * the wait_queue_head.
76                 */
77                if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
78                        sem->sleepers = 0;
79                        break;
80                }
81                sem->sleepers = 1;      /* us - see -1 above */
82                spin_unlock_irqrestore(&sem->wait.lock, flags);
83
84                schedule();
85
86                spin_lock_irqsave(&sem->wait.lock, flags);
87                tsk->state = TASK_UNINTERRUPTIBLE;
88        }
89        remove_wait_queue_locked(&sem->wait, &wait);
90        wake_up_locked(&sem->wait);
91        spin_unlock_irqrestore(&sem->wait.lock, flags);
92        tsk->state = TASK_RUNNING;
93}
94EXPORT_SYMBOL(__down);
95
96int __sched __down_interruptible(struct semaphore *sem)
97{
98	int retval = 0;
99	struct task_struct *tsk = current;
100        DECLARE_WAITQUEUE(wait, tsk);
101        unsigned long flags;
102
103        tsk->state = TASK_INTERRUPTIBLE;
104        spin_lock_irqsave(&sem->wait.lock, flags);
105        add_wait_queue_exclusive_locked(&sem->wait, &wait);
106
107        sem->sleepers++;
108        for (;;) {
109                int sleepers = sem->sleepers;
110
111		/*
112		 * With signals pending, this turns into the trylock
113		 * failure case - we won't be sleeping, and we can't
114		 * get the lock as it has contention. Just correct the
115		 * count and exit.
116		 */
117		if (signal_pending(current)) {
118			retval = -EINTR;
119			sem->sleepers = 0;
120			atomic_add(sleepers, &sem->count);
121			break;
122		}
123
124                /*
125                 * Add "everybody else" into it. They aren't
126                 * playing, because we own the spinlock in
127                 * the wait_queue_head.
128                 */
129                if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
130                        sem->sleepers = 0;
131                        break;
132                }
133                sem->sleepers = 1;      /* us - see -1 above */
134                spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136                schedule();
137
138                spin_lock_irqsave(&sem->wait.lock, flags);
139                tsk->state = TASK_INTERRUPTIBLE;
140        }
141        remove_wait_queue_locked(&sem->wait, &wait);
142        wake_up_locked(&sem->wait);
143        spin_unlock_irqrestore(&sem->wait.lock, flags);
144
145        tsk->state = TASK_RUNNING;
146	return retval;
147}
148EXPORT_SYMBOL(__down_interruptible);
149