sx.h revision 194578
1169689Skan/*- 2169689Skan * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3169689Skan * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4169689Skan * All rights reserved. 5169689Skan * 6169689Skan * Redistribution and use in source and binary forms, with or without 7169689Skan * modification, are permitted provided that the following conditions 8169689Skan * are met: 9169689Skan * 1. Redistributions of source code must retain the above copyright 10169689Skan * notice(s), this list of conditions and the following disclaimer as 11169689Skan * the first lines of this file unmodified other than the possible 12169689Skan * addition of one or more copyright notices. 13169689Skan * 2. Redistributions in binary form must reproduce the above copyright 14169689Skan * notice(s), this list of conditions and the following disclaimer in the 15169689Skan * documentation and/or other materials provided with the distribution. 16169689Skan * 17169689Skan * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18169689Skan * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19169689Skan * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20169689Skan * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 21169689Skan * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22169689Skan * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23169689Skan * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24169689Skan * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25169689Skan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26169689Skan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 27169689Skan * DAMAGE. 28169689Skan * 29169689Skan * $FreeBSD: head/sys/sys/sx.h 194578 2009-06-21 09:01:12Z rdivacky $ 30169689Skan */ 31169689Skan 32169689Skan#ifndef _SYS_SX_H_ 33169689Skan#define _SYS_SX_H_ 34169689Skan 35169689Skan#include <sys/_lock.h> 36169689Skan#include <sys/_sx.h> 37169689Skan 38169689Skan#ifdef _KERNEL 39169689Skan#include <sys/pcpu.h> 40169689Skan#include <sys/lock_profile.h> 41169689Skan#include <sys/lockstat.h> 42169689Skan#include <machine/atomic.h> 43169689Skan#endif 44169689Skan 45169689Skan/* 46169689Skan * In general, the sx locks and rwlocks use very similar algorithms. 47169689Skan * The main difference in the implementations is how threads are 48169689Skan * blocked when a lock is unavailable. For this, sx locks use sleep 49169689Skan * queues which do not support priority propagation, and rwlocks use 50169689Skan * turnstiles which do. 51169689Skan * 52169689Skan * The sx_lock field consists of several fields. The low bit 53169689Skan * indicates if the lock is locked with a shared or exclusive lock. A 54169689Skan * value of 0 indicates an exclusive lock, and a value of 1 indicates 55169689Skan * a shared lock. Bit 1 is a boolean indicating if there are any 56169689Skan * threads waiting for a shared lock. Bit 2 is a boolean indicating 57169689Skan * if there are any threads waiting for an exclusive lock. Bit 3 is a 58169689Skan * boolean indicating if an exclusive lock is recursively held. The 59169689Skan * rest of the variable's definition is dependent on the value of the 60169689Skan * first bit. For an exclusive lock, it is a pointer to the thread 61169689Skan * holding the lock, similar to the mtx_lock field of mutexes. For 62169689Skan * shared locks, it is a count of read locks that are held. 63169689Skan * 64169689Skan * When the lock is not locked by any thread, it is encoded as a 65169689Skan * shared lock with zero waiters. 66169689Skan * 67169689Skan * A note about memory barriers. Exclusive locks need to use the same 68169689Skan * memory barriers as mutexes: _acq when acquiring an exclusive lock 69169689Skan * and _rel when releasing an exclusive lock. On the other side, 70169689Skan * shared lock needs to use an _acq barrier when acquiring the lock 71169689Skan * but, since they don't update any locked data, no memory barrier is 72169689Skan * needed when releasing a shared lock. 73169689Skan */ 74169689Skan 75169689Skan#define SX_LOCK_SHARED 0x01 76169689Skan#define SX_LOCK_SHARED_WAITERS 0x02 77169710Skan#define SX_LOCK_EXCLUSIVE_WAITERS 0x04 78169689Skan#define SX_LOCK_RECURSED 0x08 79169689Skan#define SX_LOCK_FLAGMASK \ 80169689Skan (SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS | \ 81169689Skan SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED) 82169710Skan 83169689Skan#define SX_OWNER(x) ((x) & ~SX_LOCK_FLAGMASK) 84169689Skan#define SX_SHARERS_SHIFT 4 85169689Skan#define SX_SHARERS(x) (SX_OWNER(x) >> SX_SHARERS_SHIFT) 86169689Skan#define SX_SHARERS_LOCK(x) \ 87169689Skan ((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED) 88169689Skan#define SX_ONE_SHARER (1 << SX_SHARERS_SHIFT) 89169689Skan 90169689Skan#define SX_LOCK_UNLOCKED SX_SHARERS_LOCK(0) 91169689Skan#define SX_LOCK_DESTROYED \ 92169689Skan (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS) 93169689Skan 94169689Skan#ifdef _KERNEL 95169689Skan 96169689Skan/* 97169689Skan * Function prototipes. Routines that start with an underscore are not part 98169689Skan * of the public interface and are wrappered with a macro. 99169689Skan */ 100169689Skanvoid sx_sysinit(void *arg); 101169689Skan#define sx_init(sx, desc) sx_init_flags((sx), (desc), 0) 102169689Skanvoid sx_init_flags(struct sx *sx, const char *description, int opts); 103169689Skanvoid sx_destroy(struct sx *sx); 104169689Skanint _sx_slock(struct sx *sx, int opts, const char *file, int line); 105169689Skanint _sx_xlock(struct sx *sx, int opts, const char *file, int line); 106169689Skanint _sx_try_slock(struct sx *sx, const char *file, int line); 107169689Skanint _sx_try_xlock(struct sx *sx, const char *file, int line); 108169689Skanvoid _sx_sunlock(struct sx *sx, const char *file, int line); 109169689Skanvoid _sx_xunlock(struct sx *sx, const char *file, int line); 110169689Skanint _sx_try_upgrade(struct sx *sx, const char *file, int line); 111169689Skanvoid _sx_downgrade(struct sx *sx, const char *file, int line); 112169689Skanint _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, 113169689Skan const char *file, int line); 114169689Skanint _sx_slock_hard(struct sx *sx, int opts, const char *file, int line); 115169689Skanvoid _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int 116169689Skan line); 117169689Skanvoid _sx_sunlock_hard(struct sx *sx, const char *file, int line); 118169689Skan#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 119169689Skanvoid _sx_assert(struct sx *sx, int what, const char *file, int line); 120169689Skan#endif 121169689Skan#ifdef DDB 122169689Skanint sx_chain(struct thread *td, struct thread **ownerp); 123169689Skan#endif 124169689Skan 125169689Skanstruct sx_args { 126169689Skan struct sx *sa_sx; 127169689Skan const char *sa_desc; 128169689Skan}; 129169689Skan 130169689Skan#define SX_SYSINIT(name, sxa, desc) \ 131169689Skan static struct sx_args name##_args = { \ 132169689Skan (sxa), \ 133169689Skan (desc) \ 134169689Skan }; \ 135169689Skan SYSINIT(name##_sx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 136169689Skan sx_sysinit, &name##_args); \ 137169689Skan SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 138169689Skan sx_destroy, (sxa)) 139169689Skan 140169689Skan/* 141169689Skan * Full lock operations that are suitable to be inlined in non-debug kernels. 142169689Skan * If the lock can't be acquired or released trivially then the work is 143169689Skan * deferred to 'tougher' functions. 144169689Skan */ 145169689Skan 146169689Skan/* Acquire an exclusive lock. */ 147169689Skanstatic __inline int 148169689Skan__sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file, 149169689Skan int line) 150169689Skan{ 151169689Skan uintptr_t tid = (uintptr_t)td; 152169689Skan int error = 0; 153169689Skan 154169689Skan if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) 155169689Skan error = _sx_xlock_hard(sx, tid, opts, file, line); 156169689Skan else 157169689Skan LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, 158169689Skan sx, 0, 0, file, line); 159169689Skan 160169689Skan return (error); 161169689Skan} 162169689Skan 163169689Skan/* Release an exclusive lock. */ 164169689Skanstatic __inline void 165169689Skan__sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line) 166169689Skan{ 167169689Skan uintptr_t tid = (uintptr_t)td; 168169689Skan 169169689Skan if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 170169689Skan _sx_xunlock_hard(sx, tid, file, line); 171169689Skan} 172169689Skan 173169689Skan/* Acquire a shared lock. */ 174169689Skanstatic __inline int 175169689Skan__sx_slock(struct sx *sx, int opts, const char *file, int line) 176169689Skan{ 177169689Skan uintptr_t x = sx->sx_lock; 178169689Skan int error = 0; 179169689Skan 180169689Skan if (!(x & SX_LOCK_SHARED) || 181169689Skan !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) 182169689Skan error = _sx_slock_hard(sx, opts, file, line); 183169689Skan else 184169689Skan LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 0, 185169689Skan 0, file, line); 186169689Skan 187169689Skan return (error); 188169689Skan} 189169689Skan 190169689Skan/* 191169689Skan * Release a shared lock. We can just drop a single shared lock so 192169689Skan * long as we aren't trying to drop the last shared lock when other 193169689Skan * threads are waiting for an exclusive lock. This takes advantage of 194169689Skan * the fact that an unlocked lock is encoded as a shared lock with a 195169689Skan * count of 0. 196169689Skan */ 197169689Skanstatic __inline void 198169689Skan__sx_sunlock(struct sx *sx, const char *file, int line) 199169689Skan{ 200169689Skan uintptr_t x = sx->sx_lock; 201169689Skan 202169689Skan if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) || 203169689Skan !atomic_cmpset_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER)) 204169689Skan _sx_sunlock_hard(sx, file, line); 205169689Skan} 206169689Skan 207169689Skan/* 208169689Skan * Public interface for lock operations. 209169689Skan */ 210169689Skan#ifndef LOCK_DEBUG 211169689Skan#error "LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>" 212169689Skan#endif 213169689Skan#if (LOCK_DEBUG > 0) || defined(SX_NOINLINE) 214169689Skan#define sx_xlock(sx) (void)_sx_xlock((sx), 0, LOCK_FILE, LOCK_LINE) 215169689Skan#define sx_xlock_sig(sx) \ 216169689Skan _sx_xlock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE) 217169689Skan#define sx_xunlock(sx) _sx_xunlock((sx), LOCK_FILE, LOCK_LINE) 218169689Skan#define sx_slock(sx) (void)_sx_slock((sx), 0, LOCK_FILE, LOCK_LINE) 219169689Skan#define sx_slock_sig(sx) \ 220169689Skan _sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE) 221169689Skan#define sx_sunlock(sx) _sx_sunlock((sx), LOCK_FILE, LOCK_LINE) 222169689Skan#else 223169689Skan#define sx_xlock(sx) \ 224169689Skan (void)__sx_xlock((sx), curthread, 0, LOCK_FILE, LOCK_LINE) 225169689Skan#define sx_xlock_sig(sx) \ 226169689Skan __sx_xlock((sx), curthread, SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE) 227169689Skan#define sx_xunlock(sx) \ 228169689Skan __sx_xunlock((sx), curthread, LOCK_FILE, LOCK_LINE) 229169689Skan#define sx_slock(sx) (void)__sx_slock((sx), 0, LOCK_FILE, LOCK_LINE) 230169689Skan#define sx_slock_sig(sx) \ 231169689Skan __sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE) 232169689Skan#define sx_sunlock(sx) __sx_sunlock((sx), LOCK_FILE, LOCK_LINE) 233169689Skan#endif /* LOCK_DEBUG > 0 || SX_NOINLINE */ 234169689Skan#define sx_try_slock(sx) _sx_try_slock((sx), LOCK_FILE, LOCK_LINE) 235169689Skan#define sx_try_xlock(sx) _sx_try_xlock((sx), LOCK_FILE, LOCK_LINE) 236169689Skan#define sx_try_upgrade(sx) _sx_try_upgrade((sx), LOCK_FILE, LOCK_LINE) 237169689Skan#define sx_downgrade(sx) _sx_downgrade((sx), LOCK_FILE, LOCK_LINE) 238169689Skan 239169689Skan/* 240169689Skan * Return a pointer to the owning thread if the lock is exclusively 241169689Skan * locked. 242169689Skan */ 243169689Skan#define sx_xholder(sx) \ 244169689Skan ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \ 245169689Skan (struct thread *)SX_OWNER((sx)->sx_lock)) 246169689Skan 247169689Skan#define sx_xlocked(sx) \ 248169689Skan (((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \ 249169689Skan (uintptr_t)curthread) 250169689Skan 251169689Skan#define sx_unlock(sx) do { \ 252169689Skan if (sx_xlocked(sx)) \ 253169689Skan sx_xunlock(sx); \ 254169689Skan else \ 255169689Skan sx_sunlock(sx); \ 256169689Skan} while (0) 257169689Skan 258169689Skan#define sx_sleep(chan, sx, pri, wmesg, timo) \ 259169689Skan _sleep((chan), &(sx)->lock_object, (pri), (wmesg), (timo)) 260169689Skan 261169689Skan/* 262169689Skan * Options passed to sx_init_flags(). 263169689Skan */ 264169689Skan#define SX_DUPOK 0x01 265169689Skan#define SX_NOPROFILE 0x02 266169689Skan#define SX_NOWITNESS 0x04 267169689Skan#define SX_QUIET 0x08 268169689Skan#define SX_NOADAPTIVE 0x10 269169689Skan#define SX_RECURSE 0x20 270169689Skan 271169689Skan/* 272169689Skan * Options passed to sx_*lock_hard(). 273169689Skan */ 274169689Skan#define SX_INTERRUPTIBLE 0x40 275169689Skan 276169689Skan#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 277169689Skan#define SA_LOCKED LA_LOCKED 278169689Skan#define SA_SLOCKED LA_SLOCKED 279169689Skan#define SA_XLOCKED LA_XLOCKED 280169689Skan#define SA_UNLOCKED LA_UNLOCKED 281169689Skan#define SA_RECURSED LA_RECURSED 282169689Skan#define SA_NOTRECURSED LA_NOTRECURSED 283169689Skan 284169689Skan/* Backwards compatability. */ 285169689Skan#define SX_LOCKED LA_LOCKED 286169689Skan#define SX_SLOCKED LA_SLOCKED 287169689Skan#define SX_XLOCKED LA_XLOCKED 288169689Skan#define SX_UNLOCKED LA_UNLOCKED 289169689Skan#define SX_RECURSED LA_RECURSED 290169689Skan#define SX_NOTRECURSED LA_NOTRECURSED 291169689Skan#endif 292169689Skan 293169689Skan#ifdef INVARIANTS 294169689Skan#define sx_assert(sx, what) _sx_assert((sx), (what), LOCK_FILE, LOCK_LINE) 295169689Skan#else 296169689Skan#define sx_assert(sx, what) (void)0 297169689Skan#endif 298169689Skan 299169689Skan#endif /* _KERNEL */ 300169689Skan 301169689Skan#endif /* !_SYS_SX_H_ */ 302169689Skan