1/* 2 * ARM semaphore implementation, taken from 3 * 4 * i386 semaphore implementation. 5 * 6 * (C) Copyright 1999 Linus Torvalds 7 * 8 * Modified for ARM by Russell King 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14#include <linux/config.h> 15#include <linux/sched.h> 16 17#include <asm/semaphore.h> 18 19/* 20 * Semaphores are implemented using a two-way counter: 21 * The "count" variable is decremented for each process 22 * that tries to acquire the semaphore, while the "sleeping" 23 * variable is a count of such acquires. 24 * 25 * Notably, the inline "up()" and "down()" functions can 26 * efficiently test if they need to do any extra work (up 27 * needs to do something only if count was negative before 28 * the increment operation. 29 * 30 * "sleeping" and the contention routine ordering is 31 * protected by the semaphore spinlock. 32 * 33 * Note that these functions are only called when there is 34 * contention on the lock, and as such all this is the 35 * "non-critical" part of the whole semaphore business. The 36 * critical part is the inline stuff in <asm/semaphore.h> 37 * where we want to avoid any extra jumps and calls. 38 */ 39 40/* 41 * Logic: 42 * - only on a boundary condition do we need to care. When we go 43 * from a negative count to a non-negative, we wake people up. 44 * - when we go from a non-negative count to a negative do we 45 * (a) synchronize with the "sleeper" count and (b) make sure 46 * that we're on the wakeup list before we synchronize so that 47 * we cannot lose wakeup events. 48 */ 49 50void __up(struct semaphore *sem) 51{ 52 wake_up(&sem->wait); 53} 54 55static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED; 56 57void __down(struct semaphore * sem) 58{ 59 struct task_struct *tsk = current; 60 DECLARE_WAITQUEUE(wait, tsk); 61 tsk->state = TASK_UNINTERRUPTIBLE; 62 add_wait_queue_exclusive(&sem->wait, &wait); 63 64 spin_lock_irq(&semaphore_lock); 65 sem->sleepers++; 66 for (;;) { 67 int sleepers = sem->sleepers; 68 69 /* 70 * Add "everybody else" into it. They aren't 71 * playing, because we own the spinlock. 72 */ 73 if (!atomic_add_negative(sleepers - 1, &sem->count)) { 74 sem->sleepers = 0; 75 break; 76 } 77 sem->sleepers = 1; /* us - see -1 above */ 78 spin_unlock_irq(&semaphore_lock); 79 80 schedule(); 81 tsk->state = TASK_UNINTERRUPTIBLE; 82 spin_lock_irq(&semaphore_lock); 83 } 84 spin_unlock_irq(&semaphore_lock); 85 remove_wait_queue(&sem->wait, &wait); 86 tsk->state = TASK_RUNNING; 87 wake_up(&sem->wait); 88} 89 90int __down_interruptible(struct semaphore * sem) 91{ 92 int retval = 0; 93 struct task_struct *tsk = current; 94 DECLARE_WAITQUEUE(wait, tsk); 95 tsk->state = TASK_INTERRUPTIBLE; 96 add_wait_queue_exclusive(&sem->wait, &wait); 97 98 spin_lock_irq(&semaphore_lock); 99 sem->sleepers ++; 100 for (;;) { 101 int sleepers = sem->sleepers; 102 103 /* 104 * With signals pending, this turns into 105 * the trylock failure case - we won't be 106 * sleeping, and we* can't get the lock as 107 * it has contention. Just correct the count 108 * and exit. 109 */ 110 if (signal_pending(current)) { 111 retval = -EINTR; 112 sem->sleepers = 0; 113 atomic_add(sleepers, &sem->count); 114 break; 115 } 116 117 /* 118 * Add "everybody else" into it. They aren't 119 * playing, because we own the spinlock. The 120 * "-1" is because we're still hoping to get 121 * the lock. 122 */ 123 if (!atomic_add_negative(sleepers - 1, &sem->count)) { 124 sem->sleepers = 0; 125 break; 126 } 127 sem->sleepers = 1; /* us - see -1 above */ 128 spin_unlock_irq(&semaphore_lock); 129 130 schedule(); 131 tsk->state = TASK_INTERRUPTIBLE; 132 spin_lock_irq(&semaphore_lock); 133 } 134 spin_unlock_irq(&semaphore_lock); 135 tsk->state = TASK_RUNNING; 136 remove_wait_queue(&sem->wait, &wait); 137 wake_up(&sem->wait); 138 return retval; 139} 140 141/* 142 * Trylock failed - make sure we correct for 143 * having decremented the count. 144 * 145 * We could have done the trylock with a 146 * single "cmpxchg" without failure cases, 147 * but then it wouldn't work on a 386. 148 */ 149int __down_trylock(struct semaphore * sem) 150{ 151 int sleepers; 152 unsigned long flags; 153 154 spin_lock_irqsave(&semaphore_lock, flags); 155 sleepers = sem->sleepers + 1; 156 sem->sleepers = 0; 157 158 /* 159 * Add "everybody else" and us into it. They aren't 160 * playing, because we own the spinlock. 161 */ 162 if (!atomic_add_negative(sleepers, &sem->count)) 163 wake_up(&sem->wait); 164 165 spin_unlock_irqrestore(&semaphore_lock, flags); 166 return 1; 167} 168 169/* 170 * The semaphore operations have a special calling sequence that 171 * allow us to do a simpler in-line version of them. These routines 172 * need to convert that sequence back into the C sequence when 173 * there is contention on the semaphore. 174 * 175 * ip contains the semaphore pointer on entry. Save the C-clobbered 176 * registers (r0 to r3 and lr), but not ip, as we use it as a return 177 * value in some cases.. 178 */ 179#ifdef CONFIG_CPU_26 180asm(" .align 5 181 .globl __down_failed 182__down_failed: 183 stmfd sp!, {r0 - r3, lr} 184 mov r0, ip 185 bl __down 186 ldmfd sp!, {r0 - r3, pc}^ 187 188 .align 5 189 .globl __down_interruptible_failed 190__down_interruptible_failed: 191 stmfd sp!, {r0 - r3, lr} 192 mov r0, ip 193 bl __down_interruptible 194 mov ip, r0 195 ldmfd sp!, {r0 - r3, pc}^ 196 197 .align 5 198 .globl __down_trylock_failed 199__down_trylock_failed: 200 stmfd sp!, {r0 - r3, lr} 201 mov r0, ip 202 bl __down_trylock 203 mov ip, r0 204 ldmfd sp!, {r0 - r3, pc}^ 205 206 .align 5 207 .globl __up_wakeup 208__up_wakeup: 209 stmfd sp!, {r0 - r3, lr} 210 mov r0, ip 211 bl __up 212 ldmfd sp!, {r0 - r3, pc}^ 213 "); 214 215#else 216/* 32 bit version */ 217asm(" .align 5 218 .globl __down_failed 219__down_failed: 220 stmfd sp!, {r0 - r3, lr} 221 mov r0, ip 222 bl __down 223 ldmfd sp!, {r0 - r3, pc} 224 225 .align 5 226 .globl __down_interruptible_failed 227__down_interruptible_failed: 228 stmfd sp!, {r0 - r3, lr} 229 mov r0, ip 230 bl __down_interruptible 231 mov ip, r0 232 ldmfd sp!, {r0 - r3, pc} 233 234 .align 5 235 .globl __down_trylock_failed 236__down_trylock_failed: 237 stmfd sp!, {r0 - r3, lr} 238 mov r0, ip 239 bl __down_trylock 240 mov ip, r0 241 ldmfd sp!, {r0 - r3, pc} 242 243 .align 5 244 .globl __up_wakeup 245__up_wakeup: 246 stmfd sp!, {r0 - r3, lr} 247 mov r0, ip 248 bl __up 249 ldmfd sp!, {r0 - r3, pc} 250 "); 251 252#endif 253