1/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ 2 * 3 * Written by David Howells (dhowells@redhat.com). 4 * 5 * Derived from asm-x86/semaphore.h 6 * 7 * 8 * The MSW of the count is the negated number of active writers and waiting 9 * lockers, and the LSW is the total number of active locks 10 * 11 * The lock count is initialized to 0 (no active and no waiting lockers). 12 * 13 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an 14 * uncontended lock. This can be determined because XADD returns the old value. 15 * Readers increment by 1 and see a positive value when uncontended, negative 16 * if there are writers (and maybe) readers waiting (in which case it goes to 17 * sleep). 18 * 19 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can 20 * be extended to 65534 by manually checking the whole MSW rather than relying 21 * on the S flag. 22 * 23 * The value of ACTIVE_BIAS supports up to 65535 active processes. 24 * 25 * This should be totally fair - if anything is waiting, a process that wants a 26 * lock will go to the back of the queue. When the currently active lock is 27 * released, if there's a writer at the front of the queue, then that and only 28 * that will be woken up; if there's a bunch of consequtive readers at the 29 * front, then they'll all be woken up, but no other readers will be. 30 */ 31 32#ifndef _ASM_X86_RWSEM_H 33#define _ASM_X86_RWSEM_H 34 35#ifndef _LINUX_RWSEM_H 36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 37#endif 38 39#ifdef __KERNEL__ 40 41#include <linux/list.h> 42#include <linux/spinlock.h> 43#include <linux/lockdep.h> 44#include <asm/asm.h> 45 46struct rwsem_waiter; 47 48extern asmregparm struct rw_semaphore * 49 rwsem_down_read_failed(struct rw_semaphore *sem); 50extern asmregparm struct rw_semaphore * 51 rwsem_down_write_failed(struct rw_semaphore *sem); 52extern asmregparm struct rw_semaphore * 53 rwsem_wake(struct rw_semaphore *); 54extern asmregparm struct rw_semaphore * 55 rwsem_downgrade_wake(struct rw_semaphore *sem); 56 57/* 58 * the semaphore definition 59 * 60 * The bias values and the counter type limits the number of 61 * potential readers/writers to 32767 for 32 bits and 2147483647 62 * for 64 bits. 63 */ 64 65#ifdef CONFIG_X86_64 66# define RWSEM_ACTIVE_MASK 0xffffffffL 67#else 68# define RWSEM_ACTIVE_MASK 0x0000ffffL 69#endif 70 71#define RWSEM_UNLOCKED_VALUE 0x00000000L 72#define RWSEM_ACTIVE_BIAS 0x00000001L 73#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) 74#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 75#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 76 77typedef signed long rwsem_count_t; 78 79struct rw_semaphore { 80 rwsem_count_t count; 81 spinlock_t wait_lock; 82 struct list_head wait_list; 83#ifdef CONFIG_DEBUG_LOCK_ALLOC 84 struct lockdep_map dep_map; 85#endif 86}; 87 88#ifdef CONFIG_DEBUG_LOCK_ALLOC 89# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } 90#else 91# define __RWSEM_DEP_MAP_INIT(lockname) 92#endif 93 94 95#define __RWSEM_INITIALIZER(name) \ 96{ \ 97 RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ 98 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ 99} 100 101#define DECLARE_RWSEM(name) \ 102 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 103 104extern void __init_rwsem(struct rw_semaphore *sem, const char *name, 105 struct lock_class_key *key); 106 107#define init_rwsem(sem) \ 108do { \ 109 static struct lock_class_key __key; \ 110 \ 111 __init_rwsem((sem), #sem, &__key); \ 112} while (0) 113 114/* 115 * lock for reading 116 */ 117static inline void __down_read(struct rw_semaphore *sem) 118{ 119 asm volatile("# beginning down_read\n\t" 120 LOCK_PREFIX _ASM_INC "(%1)\n\t" 121 /* adds 0x00000001 */ 122 " jns 1f\n" 123 " call call_rwsem_down_read_failed\n" 124 "1:\n\t" 125 "# ending down_read\n\t" 126 : "+m" (sem->count) 127 : "a" (sem) 128 : "memory", "cc"); 129} 130 131/* 132 * trylock for reading -- returns 1 if successful, 0 if contention 133 */ 134static inline int __down_read_trylock(struct rw_semaphore *sem) 135{ 136 rwsem_count_t result, tmp; 137 asm volatile("# beginning __down_read_trylock\n\t" 138 " mov %0,%1\n\t" 139 "1:\n\t" 140 " mov %1,%2\n\t" 141 " add %3,%2\n\t" 142 " jle 2f\n\t" 143 LOCK_PREFIX " cmpxchg %2,%0\n\t" 144 " jnz 1b\n\t" 145 "2:\n\t" 146 "# ending __down_read_trylock\n\t" 147 : "+m" (sem->count), "=&a" (result), "=&r" (tmp) 148 : "i" (RWSEM_ACTIVE_READ_BIAS) 149 : "memory", "cc"); 150 return result >= 0 ? 1 : 0; 151} 152 153/* 154 * lock for writing 155 */ 156static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 157{ 158 rwsem_count_t tmp; 159 asm volatile("# beginning down_write\n\t" 160 LOCK_PREFIX " xadd %1,(%2)\n\t" 161 /* adds 0xffff0001, returns the old value */ 162 " test %1,%1\n\t" 163 /* was the count 0 before? */ 164 " jz 1f\n" 165 " call call_rwsem_down_write_failed\n" 166 "1:\n" 167 "# ending down_write" 168 : "+m" (sem->count), "=d" (tmp) 169 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) 170 : "memory", "cc"); 171} 172 173static inline void __down_write(struct rw_semaphore *sem) 174{ 175 __down_write_nested(sem, 0); 176} 177 178/* 179 * trylock for writing -- returns 1 if successful, 0 if contention 180 */ 181static inline int __down_write_trylock(struct rw_semaphore *sem) 182{ 183 rwsem_count_t ret = cmpxchg(&sem->count, 184 RWSEM_UNLOCKED_VALUE, 185 RWSEM_ACTIVE_WRITE_BIAS); 186 if (ret == RWSEM_UNLOCKED_VALUE) 187 return 1; 188 return 0; 189} 190 191/* 192 * unlock after reading 193 */ 194static inline void __up_read(struct rw_semaphore *sem) 195{ 196 rwsem_count_t tmp; 197 asm volatile("# beginning __up_read\n\t" 198 LOCK_PREFIX " xadd %1,(%2)\n\t" 199 /* subtracts 1, returns the old value */ 200 " jns 1f\n\t" 201 " call call_rwsem_wake\n" /* expects old value in %edx */ 202 "1:\n" 203 "# ending __up_read\n" 204 : "+m" (sem->count), "=d" (tmp) 205 : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS) 206 : "memory", "cc"); 207} 208 209/* 210 * unlock after writing 211 */ 212static inline void __up_write(struct rw_semaphore *sem) 213{ 214 rwsem_count_t tmp; 215 asm volatile("# beginning __up_write\n\t" 216 LOCK_PREFIX " xadd %1,(%2)\n\t" 217 /* subtracts 0xffff0001, returns the old value */ 218 " jns 1f\n\t" 219 " call call_rwsem_wake\n" /* expects old value in %edx */ 220 "1:\n\t" 221 "# ending __up_write\n" 222 : "+m" (sem->count), "=d" (tmp) 223 : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) 224 : "memory", "cc"); 225} 226 227/* 228 * downgrade write lock to read lock 229 */ 230static inline void __downgrade_write(struct rw_semaphore *sem) 231{ 232 asm volatile("# beginning __downgrade_write\n\t" 233 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" 234 /* 235 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) 236 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) 237 */ 238 " jns 1f\n\t" 239 " call call_rwsem_downgrade_wake\n" 240 "1:\n\t" 241 "# ending __downgrade_write\n" 242 : "+m" (sem->count) 243 : "a" (sem), "er" (-RWSEM_WAITING_BIAS) 244 : "memory", "cc"); 245} 246 247/* 248 * implement atomic add functionality 249 */ 250static inline void rwsem_atomic_add(rwsem_count_t delta, 251 struct rw_semaphore *sem) 252{ 253 asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" 254 : "+m" (sem->count) 255 : "er" (delta)); 256} 257 258/* 259 * implement exchange and add functionality 260 */ 261static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, 262 struct rw_semaphore *sem) 263{ 264 rwsem_count_t tmp = delta; 265 266 asm volatile(LOCK_PREFIX "xadd %0,%1" 267 : "+r" (tmp), "+m" (sem->count) 268 : : "memory"); 269 270 return tmp + delta; 271} 272 273static inline int rwsem_is_locked(struct rw_semaphore *sem) 274{ 275 return (sem->count != 0); 276} 277 278#endif /* __KERNEL__ */ 279#endif /* _ASM_X86_RWSEM_H */ 280