1/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ 2 * 3 * Written by David Howells (dhowells@redhat.com). 4 * 5 * Derived from asm-i386/semaphore.h 6 * 7 * 8 * The MSW of the count is the negated number of active writers and waiting 9 * lockers, and the LSW is the total number of active locks 10 * 11 * The lock count is initialized to 0 (no active and no waiting lockers). 12 * 13 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an 14 * uncontended lock. This can be determined because XADD returns the old value. 15 * Readers increment by 1 and see a positive value when uncontended, negative 16 * if there are writers (and maybe) readers waiting (in which case it goes to 17 * sleep). 18 * 19 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can 20 * be extended to 65534 by manually checking the whole MSW rather than relying 21 * on the S flag. 22 * 23 * The value of ACTIVE_BIAS supports up to 65535 active processes. 24 * 25 * This should be totally fair - if anything is waiting, a process that wants a 26 * lock will go to the back of the queue. When the currently active lock is 27 * released, if there's a writer at the front of the queue, then that and only 28 * that will be woken up; if there's a bunch of consequtive readers at the 29 * front, then they'll all be woken up, but no other readers will be. 30 */ 31 32#ifndef _I386_RWSEM_H 33#define _I386_RWSEM_H 34 35#ifndef _LINUX_RWSEM_H 36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 37#endif 38 39#ifdef __KERNEL__ 40 41#include <linux/list.h> 42#include <linux/spinlock.h> 43#include <linux/lockdep.h> 44 45struct rwsem_waiter; 46 47extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); 48extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); 49extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); 50extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); 51 52/* 53 * the semaphore definition 54 */ 55struct rw_semaphore { 56 signed long count; 57#define RWSEM_UNLOCKED_VALUE 0x00000000 58#define RWSEM_ACTIVE_BIAS 0x00000001 59#define RWSEM_ACTIVE_MASK 0x0000ffff 60#define RWSEM_WAITING_BIAS (-0x00010000) 61#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 62#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 63 spinlock_t wait_lock; 64 struct list_head wait_list; 65#ifdef CONFIG_DEBUG_LOCK_ALLOC 66 struct lockdep_map dep_map; 67#endif 68}; 69 70#ifdef CONFIG_DEBUG_LOCK_ALLOC 71# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } 72#else 73# define __RWSEM_DEP_MAP_INIT(lockname) 74#endif 75 76 77#define __RWSEM_INITIALIZER(name) \ 78{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ 79 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } 80 81#define DECLARE_RWSEM(name) \ 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 83 84extern void __init_rwsem(struct rw_semaphore *sem, const char *name, 85 struct lock_class_key *key); 86 87#define init_rwsem(sem) \ 88do { \ 89 static struct lock_class_key __key; \ 90 \ 91 __init_rwsem((sem), #sem, &__key); \ 92} while (0) 93 94/* 95 * lock for reading 96 */ 97static inline void __down_read(struct rw_semaphore *sem) 98{ 99 __asm__ __volatile__( 100 "# beginning down_read\n\t" 101LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ 102 " jns 1f\n" 103 " call call_rwsem_down_read_failed\n" 104 "1:\n\t" 105 "# ending down_read\n\t" 106 : "+m" (sem->count) 107 : "a" (sem) 108 : "memory", "cc"); 109} 110 111/* 112 * trylock for reading -- returns 1 if successful, 0 if contention 113 */ 114static inline int __down_read_trylock(struct rw_semaphore *sem) 115{ 116 __s32 result, tmp; 117 __asm__ __volatile__( 118 "# beginning __down_read_trylock\n\t" 119 " movl %0,%1\n\t" 120 "1:\n\t" 121 " movl %1,%2\n\t" 122 " addl %3,%2\n\t" 123 " jle 2f\n\t" 124LOCK_PREFIX " cmpxchgl %2,%0\n\t" 125 " jnz 1b\n\t" 126 "2:\n\t" 127 "# ending __down_read_trylock\n\t" 128 : "+m" (sem->count), "=&a" (result), "=&r" (tmp) 129 : "i" (RWSEM_ACTIVE_READ_BIAS) 130 : "memory", "cc"); 131 return result>=0 ? 1 : 0; 132} 133 134/* 135 * lock for writing 136 */ 137static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 138{ 139 int tmp; 140 141 tmp = RWSEM_ACTIVE_WRITE_BIAS; 142 __asm__ __volatile__( 143 "# beginning down_write\n\t" 144LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ 145 " testl %%edx,%%edx\n\t" /* was the count 0 before? */ 146 " jz 1f\n" 147 " call call_rwsem_down_write_failed\n" 148 "1:\n" 149 "# ending down_write" 150 : "+m" (sem->count), "=d" (tmp) 151 : "a" (sem), "1" (tmp) 152 : "memory", "cc"); 153} 154 155static inline void __down_write(struct rw_semaphore *sem) 156{ 157 __down_write_nested(sem, 0); 158} 159 160/* 161 * trylock for writing -- returns 1 if successful, 0 if contention 162 */ 163static inline int __down_write_trylock(struct rw_semaphore *sem) 164{ 165 signed long ret = cmpxchg(&sem->count, 166 RWSEM_UNLOCKED_VALUE, 167 RWSEM_ACTIVE_WRITE_BIAS); 168 if (ret == RWSEM_UNLOCKED_VALUE) 169 return 1; 170 return 0; 171} 172 173/* 174 * unlock after reading 175 */ 176static inline void __up_read(struct rw_semaphore *sem) 177{ 178 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; 179 __asm__ __volatile__( 180 "# beginning __up_read\n\t" 181LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ 182 " jns 1f\n\t" 183 " call call_rwsem_wake\n" 184 "1:\n" 185 "# ending __up_read\n" 186 : "+m" (sem->count), "=d" (tmp) 187 : "a" (sem), "1" (tmp) 188 : "memory", "cc"); 189} 190 191/* 192 * unlock after writing 193 */ 194static inline void __up_write(struct rw_semaphore *sem) 195{ 196 __asm__ __volatile__( 197 "# beginning __up_write\n\t" 198 " movl %2,%%edx\n\t" 199LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ 200 " jz 1f\n" 201 " call call_rwsem_wake\n" 202 "1:\n\t" 203 "# ending __up_write\n" 204 : "+m" (sem->count) 205 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) 206 : "memory", "cc", "edx"); 207} 208 209/* 210 * downgrade write lock to read lock 211 */ 212static inline void __downgrade_write(struct rw_semaphore *sem) 213{ 214 __asm__ __volatile__( 215 "# beginning __downgrade_write\n\t" 216LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ 217 " jns 1f\n\t" 218 " call call_rwsem_downgrade_wake\n" 219 "1:\n\t" 220 "# ending __downgrade_write\n" 221 : "+m" (sem->count) 222 : "a" (sem), "i" (-RWSEM_WAITING_BIAS) 223 : "memory", "cc"); 224} 225 226/* 227 * implement atomic add functionality 228 */ 229static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 230{ 231 __asm__ __volatile__( 232LOCK_PREFIX "addl %1,%0" 233 : "+m" (sem->count) 234 : "ir" (delta)); 235} 236 237/* 238 * implement exchange and add functionality 239 */ 240static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 241{ 242 int tmp = delta; 243 244 __asm__ __volatile__( 245LOCK_PREFIX "xadd %0,%1" 246 : "+r" (tmp), "+m" (sem->count) 247 : : "memory"); 248 249 return tmp+delta; 250} 251 252static inline int rwsem_is_locked(struct rw_semaphore *sem) 253{ 254 return (sem->count != 0); 255} 256 257#endif /* __KERNEL__ */ 258#endif /* _I386_RWSEM_H */ 259