1#ifndef __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H 3#ifdef __KERNEL__ 4 5/* 6 * Simple spin lock operations. 7 * 8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM 9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM 11 * Rework to support virtual processors 12 * 13 * Type of int is used as a full 64b word is not necessary. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 * (the type definitions are in asm/spinlock_types.h) 21 */ 22#ifdef CONFIG_PPC64 23#include <asm/paca.h> 24#include <asm/hvcall.h> 25#include <asm/iseries/hv_call.h> 26#endif 27#include <asm/asm-compat.h> 28#include <asm/synch.h> 29 30#define __raw_spin_is_locked(x) ((x)->slock != 0) 31 32#ifdef CONFIG_PPC64 33/* use 0x800000yy when locked, where yy == CPU number */ 34#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 35#else 36#define LOCK_TOKEN 1 37#endif 38 39#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) 40#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) 41#define SYNC_IO do { \ 42 if (unlikely(get_paca()->io_sync)) { \ 43 mb(); \ 44 get_paca()->io_sync = 0; \ 45 } \ 46 } while (0) 47#else 48#define CLEAR_IO_SYNC 49#define SYNC_IO 50#endif 51 52/* 53 * This returns the old value in the lock, so we succeeded 54 * in getting the lock if the return value is 0. 55 */ 56static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) 57{ 58 unsigned long tmp, token; 59 60 token = LOCK_TOKEN; 61 __asm__ __volatile__( 62"1: lwarx %0,0,%2\n\ 63 cmpwi 0,%0,0\n\ 64 bne- 2f\n\ 65 stwcx. %1,0,%2\n\ 66 bne- 1b\n\ 67 isync\n\ 682:" : "=&r" (tmp) 69 : "r" (token), "r" (&lock->slock) 70 : "cr0", "memory"); 71 72 return tmp; 73} 74 75static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) 76{ 77 CLEAR_IO_SYNC; 78 return __spin_trylock(lock) == 0; 79} 80 81/* 82 * On a system with shared processors (that is, where a physical 83 * processor is multiplexed between several virtual processors), 84 * there is no point spinning on a lock if the holder of the lock 85 * isn't currently scheduled on a physical processor. Instead 86 * we detect this situation and ask the hypervisor to give the 87 * rest of our timeslice to the lock holder. 88 * 89 * So that we can tell which virtual processor is holding a lock, 90 * we put 0x80000000 | smp_processor_id() in the lock when it is 91 * held. Conveniently, we have a word in the paca that holds this 92 * value. 93 */ 94 95#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 96/* We only yield to the hypervisor if we are in shared processor mode */ 97#define SHARED_PROCESSOR (get_lppaca()->shared_proc) 98extern void __spin_yield(raw_spinlock_t *lock); 99extern void __rw_yield(raw_rwlock_t *lock); 100#else /* SPLPAR || ISERIES */ 101#define __spin_yield(x) barrier() 102#define __rw_yield(x) barrier() 103#define SHARED_PROCESSOR 0 104#endif 105 106static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) 107{ 108 CLEAR_IO_SYNC; 109 while (1) { 110 if (likely(__spin_trylock(lock) == 0)) 111 break; 112 do { 113 HMT_low(); 114 if (SHARED_PROCESSOR) 115 __spin_yield(lock); 116 } while (unlikely(lock->slock != 0)); 117 HMT_medium(); 118 } 119} 120 121static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 122{ 123 unsigned long flags_dis; 124 125 CLEAR_IO_SYNC; 126 while (1) { 127 if (likely(__spin_trylock(lock) == 0)) 128 break; 129 local_save_flags(flags_dis); 130 local_irq_restore(flags); 131 do { 132 HMT_low(); 133 if (SHARED_PROCESSOR) 134 __spin_yield(lock); 135 } while (unlikely(lock->slock != 0)); 136 HMT_medium(); 137 local_irq_restore(flags_dis); 138 } 139} 140 141static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) 142{ 143 SYNC_IO; 144 __asm__ __volatile__("# __raw_spin_unlock\n\t" 145 LWSYNC_ON_SMP: : :"memory"); 146 lock->slock = 0; 147} 148 149#ifdef CONFIG_PPC64 150extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); 151#else 152#define __raw_spin_unlock_wait(lock) \ 153 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 154#endif 155 156/* 157 * Read-write spinlocks, allowing multiple readers 158 * but only one writer. 159 * 160 * NOTE! it is quite common to have readers in interrupts 161 * but no interrupt writers. For those circumstances we 162 * can "mix" irq-safe locks - any writer needs to get a 163 * irq-safe write-lock, but readers can get non-irqsafe 164 * read-locks. 165 */ 166 167#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 168#define __raw_write_can_lock(rw) (!(rw)->lock) 169 170#ifdef CONFIG_PPC64 171#define __DO_SIGN_EXTEND "extsw %0,%0\n" 172#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ 173#else 174#define __DO_SIGN_EXTEND 175#define WRLOCK_TOKEN (-1) 176#endif 177 178/* 179 * This returns the old value in the lock + 1, 180 * so we got a read lock if the return value is > 0. 181 */ 182static long __inline__ __read_trylock(raw_rwlock_t *rw) 183{ 184 long tmp; 185 186 __asm__ __volatile__( 187"1: lwarx %0,0,%1\n" 188 __DO_SIGN_EXTEND 189" addic. %0,%0,1\n\ 190 ble- 2f\n" 191 PPC405_ERR77(0,%1) 192" stwcx. %0,0,%1\n\ 193 bne- 1b\n\ 194 isync\n\ 1952:" : "=&r" (tmp) 196 : "r" (&rw->lock) 197 : "cr0", "xer", "memory"); 198 199 return tmp; 200} 201 202/* 203 * This returns the old value in the lock, 204 * so we got the write lock if the return value is 0. 205 */ 206static __inline__ long __write_trylock(raw_rwlock_t *rw) 207{ 208 long tmp, token; 209 210 token = WRLOCK_TOKEN; 211 __asm__ __volatile__( 212"1: lwarx %0,0,%2\n\ 213 cmpwi 0,%0,0\n\ 214 bne- 2f\n" 215 PPC405_ERR77(0,%1) 216" stwcx. %1,0,%2\n\ 217 bne- 1b\n\ 218 isync\n\ 2192:" : "=&r" (tmp) 220 : "r" (token), "r" (&rw->lock) 221 : "cr0", "memory"); 222 223 return tmp; 224} 225 226static void __inline__ __raw_read_lock(raw_rwlock_t *rw) 227{ 228 while (1) { 229 if (likely(__read_trylock(rw) > 0)) 230 break; 231 do { 232 HMT_low(); 233 if (SHARED_PROCESSOR) 234 __rw_yield(rw); 235 } while (unlikely(rw->lock < 0)); 236 HMT_medium(); 237 } 238} 239 240static void __inline__ __raw_write_lock(raw_rwlock_t *rw) 241{ 242 while (1) { 243 if (likely(__write_trylock(rw) == 0)) 244 break; 245 do { 246 HMT_low(); 247 if (SHARED_PROCESSOR) 248 __rw_yield(rw); 249 } while (unlikely(rw->lock != 0)); 250 HMT_medium(); 251 } 252} 253 254static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) 255{ 256 return __read_trylock(rw) > 0; 257} 258 259static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) 260{ 261 return __write_trylock(rw) == 0; 262} 263 264static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) 265{ 266 long tmp; 267 268 __asm__ __volatile__( 269 "# read_unlock\n\t" 270 LWSYNC_ON_SMP 271"1: lwarx %0,0,%1\n\ 272 addic %0,%0,-1\n" 273 PPC405_ERR77(0,%1) 274" stwcx. %0,0,%1\n\ 275 bne- 1b" 276 : "=&r"(tmp) 277 : "r"(&rw->lock) 278 : "cr0", "memory"); 279} 280 281static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 282{ 283 __asm__ __volatile__("# write_unlock\n\t" 284 LWSYNC_ON_SMP: : :"memory"); 285 rw->lock = 0; 286} 287 288#define _raw_spin_relax(lock) __spin_yield(lock) 289#define _raw_read_relax(lock) __rw_yield(lock) 290#define _raw_write_relax(lock) __rw_yield(lock) 291 292#endif /* __KERNEL__ */ 293#endif /* __ASM_SPINLOCK_H */ 294