1/* 2 * ARM semaphore implementation, taken from 3 * 4 * i386 semaphore implementation. 5 * 6 * (C) Copyright 1999 Linus Torvalds 7 * (C) Copyright 2003 Ian Molton (ARM26 mods) 8 * 9 * Modified for ARM by Russell King 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15#include <linux/module.h> 16#include <linux/sched.h> 17#include <linux/errno.h> 18#include <linux/init.h> 19 20#include <asm/semaphore.h> 21 22/* 23 * Semaphores are implemented using a two-way counter: 24 * The "count" variable is decremented for each process 25 * that tries to acquire the semaphore, while the "sleeping" 26 * variable is a count of such acquires. 27 * 28 * Notably, the inline "up()" and "down()" functions can 29 * efficiently test if they need to do any extra work (up 30 * needs to do something only if count was negative before 31 * the increment operation. 32 * 33 * "sleeping" and the contention routine ordering is 34 * protected by the semaphore spinlock. 35 * 36 * Note that these functions are only called when there is 37 * contention on the lock, and as such all this is the 38 * "non-critical" part of the whole semaphore business. The 39 * critical part is the inline stuff in <asm/semaphore.h> 40 * where we want to avoid any extra jumps and calls. 41 */ 42 43/* 44 * Logic: 45 * - only on a boundary condition do we need to care. When we go 46 * from a negative count to a non-negative, we wake people up. 47 * - when we go from a non-negative count to a negative do we 48 * (a) synchronize with the "sleeper" count and (b) make sure 49 * that we're on the wakeup list before we synchronize so that 50 * we cannot lose wakeup events. 51 */ 52 53void __up(struct semaphore *sem) 54{ 55 wake_up(&sem->wait); 56} 57 58static DEFINE_SPINLOCK(semaphore_lock); 59 60void __sched __down(struct semaphore * sem) 61{ 62 struct task_struct *tsk = current; 63 DECLARE_WAITQUEUE(wait, tsk); 64 tsk->state = TASK_UNINTERRUPTIBLE; 65 add_wait_queue_exclusive(&sem->wait, &wait); 66 67 spin_lock_irq(&semaphore_lock); 68 sem->sleepers++; 69 for (;;) { 70 int sleepers = sem->sleepers; 71 72 /* 73 * Add "everybody else" into it. They aren't 74 * playing, because we own the spinlock. 75 */ 76 if (!atomic_add_negative(sleepers - 1, &sem->count)) { 77 sem->sleepers = 0; 78 break; 79 } 80 sem->sleepers = 1; /* us - see -1 above */ 81 spin_unlock_irq(&semaphore_lock); 82 83 schedule(); 84 tsk->state = TASK_UNINTERRUPTIBLE; 85 spin_lock_irq(&semaphore_lock); 86 } 87 spin_unlock_irq(&semaphore_lock); 88 remove_wait_queue(&sem->wait, &wait); 89 tsk->state = TASK_RUNNING; 90 wake_up(&sem->wait); 91} 92 93int __sched __down_interruptible(struct semaphore * sem) 94{ 95 int retval = 0; 96 struct task_struct *tsk = current; 97 DECLARE_WAITQUEUE(wait, tsk); 98 tsk->state = TASK_INTERRUPTIBLE; 99 add_wait_queue_exclusive(&sem->wait, &wait); 100 101 spin_lock_irq(&semaphore_lock); 102 sem->sleepers ++; 103 for (;;) { 104 int sleepers = sem->sleepers; 105 106 /* 107 * With signals pending, this turns into 108 * the trylock failure case - we won't be 109 * sleeping, and we* can't get the lock as 110 * it has contention. Just correct the count 111 * and exit. 112 */ 113 if (signal_pending(current)) { 114 retval = -EINTR; 115 sem->sleepers = 0; 116 atomic_add(sleepers, &sem->count); 117 break; 118 } 119 120 /* 121 * Add "everybody else" into it. They aren't 122 * playing, because we own the spinlock. The 123 * "-1" is because we're still hoping to get 124 * the lock. 125 */ 126 if (!atomic_add_negative(sleepers - 1, &sem->count)) { 127 sem->sleepers = 0; 128 break; 129 } 130 sem->sleepers = 1; /* us - see -1 above */ 131 spin_unlock_irq(&semaphore_lock); 132 133 schedule(); 134 tsk->state = TASK_INTERRUPTIBLE; 135 spin_lock_irq(&semaphore_lock); 136 } 137 spin_unlock_irq(&semaphore_lock); 138 tsk->state = TASK_RUNNING; 139 remove_wait_queue(&sem->wait, &wait); 140 wake_up(&sem->wait); 141 return retval; 142} 143 144/* 145 * Trylock failed - make sure we correct for 146 * having decremented the count. 147 * 148 * We could have done the trylock with a 149 * single "cmpxchg" without failure cases, 150 * but then it wouldn't work on a 386. 151 */ 152int __down_trylock(struct semaphore * sem) 153{ 154 int sleepers; 155 unsigned long flags; 156 157 spin_lock_irqsave(&semaphore_lock, flags); 158 sleepers = sem->sleepers + 1; 159 sem->sleepers = 0; 160 161 /* 162 * Add "everybody else" and us into it. They aren't 163 * playing, because we own the spinlock. 164 */ 165 if (!atomic_add_negative(sleepers, &sem->count)) 166 wake_up(&sem->wait); 167 168 spin_unlock_irqrestore(&semaphore_lock, flags); 169 return 1; 170} 171 172/* 173 * The semaphore operations have a special calling sequence that 174 * allow us to do a simpler in-line version of them. These routines 175 * need to convert that sequence back into the C sequence when 176 * there is contention on the semaphore. 177 * 178 * ip contains the semaphore pointer on entry. Save the C-clobbered 179 * registers (r0 to r3 and lr), but not ip, as we use it as a return 180 * value in some cases.. 181 */ 182asm(" .section .sched.text , #alloc, #execinstr \n\ 183 .align 5 \n\ 184 .globl __down_failed \n\ 185__down_failed: \n\ 186 stmfd sp!, {r0 - r3, lr} \n\ 187 mov r0, ip \n\ 188 bl __down \n\ 189 ldmfd sp!, {r0 - r3, pc}^ \n\ 190 \n\ 191 .align 5 \n\ 192 .globl __down_interruptible_failed \n\ 193__down_interruptible_failed: \n\ 194 stmfd sp!, {r0 - r3, lr} \n\ 195 mov r0, ip \n\ 196 bl __down_interruptible \n\ 197 mov ip, r0 \n\ 198 ldmfd sp!, {r0 - r3, pc}^ \n\ 199 \n\ 200 .align 5 \n\ 201 .globl __down_trylock_failed \n\ 202__down_trylock_failed: \n\ 203 stmfd sp!, {r0 - r3, lr} \n\ 204 mov r0, ip \n\ 205 bl __down_trylock \n\ 206 mov ip, r0 \n\ 207 ldmfd sp!, {r0 - r3, pc}^ \n\ 208 \n\ 209 .align 5 \n\ 210 .globl __up_wakeup \n\ 211__up_wakeup: \n\ 212 stmfd sp!, {r0 - r3, lr} \n\ 213 mov r0, ip \n\ 214 bl __up \n\ 215 ldmfd sp!, {r0 - r3, pc}^ \n\ 216 "); 217 218EXPORT_SYMBOL(__down_failed); 219EXPORT_SYMBOL(__down_interruptible_failed); 220EXPORT_SYMBOL(__down_trylock_failed); 221EXPORT_SYMBOL(__up_wakeup); 222