sx.h revision 169780
10SN/A/*- 20SN/A * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 30SN/A * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 40SN/A * All rights reserved. 5662Savstepan * 60SN/A * Redistribution and use in source and binary forms, with or without 70SN/A * modification, are permitted provided that the following conditions 80SN/A * are met: 90SN/A * 1. Redistributions of source code must retain the above copyright 10157SN/A * notice(s), this list of conditions and the following disclaimer as 110SN/A * the first lines of this file unmodified other than the possible 12157SN/A * addition of one or more copyright notices. 130SN/A * 2. Redistributions in binary form must reproduce the above copyright 140SN/A * notice(s), this list of conditions and the following disclaimer in the 150SN/A * documentation and/or other materials provided with the distribution. 160SN/A * 170SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 180SN/A * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 190SN/A * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 200SN/A * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 210SN/A * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 220SN/A * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 230SN/A * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24161SN/A * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25161SN/A * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26161SN/A * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 270SN/A * DAMAGE. 280SN/A * 290SN/A * $FreeBSD: head/sys/sys/sx.h 169780 2007-05-19 21:26:05Z jhb $ 300SN/A */ 310SN/A 320SN/A#ifndef _SYS_SX_H_ 33662Savstepan#define _SYS_SX_H_ 340SN/A 350SN/A#include <sys/_lock.h> 360SN/A#include <sys/_sx.h> 37 38#ifdef _KERNEL 39#include <machine/atomic.h> 40#endif 41 42/* 43 * In general, the sx locks and rwlocks use very similar algorithms. 44 * The main difference in the implementations is how threads are 45 * blocked when a lock is unavailable. For this, sx locks use sleep 46 * queues which do not support priority propagation, and rwlocks use 47 * turnstiles which do. 48 * 49 * The sx_lock field consists of several fields. The low bit 50 * indicates if the lock is locked with a shared or exclusive lock. A 51 * value of 0 indicates an exclusive lock, and a value of 1 indicates 52 * a shared lock. Bit 1 is a boolean indicating if there are any 53 * threads waiting for a shared lock. Bit 2 is a boolean indicating 54 * if there are any threads waiting for an exclusive lock. Bit 3 is a 55 * boolean indicating if an exclusive lock is recursively held. The 56 * rest of the variable's definition is dependent on the value of the 57 * first bit. For an exclusive lock, it is a pointer to the thread 58 * holding the lock, similar to the mtx_lock field of mutexes. For 59 * shared locks, it is a count of read locks that are held. 60 * 61 * When the lock is not locked by any thread, it is encoded as a 62 * shared lock with zero waiters. 63 * 64 * A note about memory barriers. Exclusive locks need to use the same 65 * memory barriers as mutexes: _acq when acquiring an exclusive lock 66 * and _rel when releasing an exclusive lock. On the other side, 67 * shared lock needs to use an _acq barrier when acquiring the lock 68 * but, since they don't update any locked data, no memory barrier is 69 * needed when releasing a shared lock. 70 */ 71 72#define SX_LOCK_SHARED 0x01 73#define SX_LOCK_SHARED_WAITERS 0x02 74#define SX_LOCK_EXCLUSIVE_WAITERS 0x04 75#define SX_LOCK_RECURSED 0x08 76#define SX_LOCK_FLAGMASK \ 77 (SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS | \ 78 SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED) 79 80#define SX_OWNER(x) ((x) & ~SX_LOCK_FLAGMASK) 81#define SX_SHARERS_SHIFT 4 82#define SX_SHARERS(x) (SX_OWNER(x) >> SX_SHARERS_SHIFT) 83#define SX_SHARERS_LOCK(x) \ 84 ((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED) 85#define SX_ONE_SHARER (1 << SX_SHARERS_SHIFT) 86 87#define SX_LOCK_UNLOCKED SX_SHARERS_LOCK(0) 88#define SX_LOCK_DESTROYED \ 89 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS) 90 91#ifdef _KERNEL 92 93/* 94 * Full lock operations that are suitable to be inlined in non-debug kernels. 95 * If the lock can't be acquired or released trivially then the work is 96 * deferred to 'tougher' functions. 97 */ 98 99/* Acquire an exclusive lock. */ 100#define __sx_xlock(sx, tid, file, line) do { \ 101 uintptr_t _tid = (uintptr_t)(tid); \ 102 \ 103 if (!atomic_cmpset_acq_ptr(&(sx)->sx_lock, SX_LOCK_UNLOCKED, \ 104 _tid)) { \ 105 _sx_xlock_hard((sx), _tid, (file), (line)); \ 106 } else \ 107 lock_profile_obtain_lock_success(&(sx)->lock_object, 0, \ 108 0, (file), (line)); \ 109} while (0) 110 111/* Release an exclusive lock. */ 112#define __sx_xunlock(sx, tid, file, line) do { \ 113 uintptr_t _tid = (uintptr_t)(tid); \ 114 \ 115 if (!atomic_cmpset_rel_ptr(&(sx)->sx_lock, _tid, \ 116 SX_LOCK_UNLOCKED)) \ 117 _sx_xunlock_hard((sx), _tid, (file), (line)); \ 118} while (0) 119 120/* Acquire a shared lock. */ 121#define __sx_slock(sx, file, line) do { \ 122 uintptr_t x = (sx)->sx_lock; \ 123 \ 124 if (!(x & SX_LOCK_SHARED) || \ 125 !atomic_cmpset_acq_ptr(&(sx)->sx_lock, x, \ 126 x + SX_ONE_SHARER)) { \ 127 _sx_slock_hard((sx), (file), (line)); \ 128 } else \ 129 lock_profile_obtain_lock_success(&(sx)->lock_object, 0, \ 130 0, (file), (line)); \ 131} while (0) 132 133/* 134 * Release a shared lock. We can just drop a single shared lock so 135 * long as we aren't trying to drop the last shared lock when other 136 * threads are waiting for an exclusive lock. This takes advantage of 137 * the fact that an unlocked lock is encoded as a shared lock with a 138 * count of 0. 139 */ 140#define __sx_sunlock(sx, file, line) do { \ 141 uintptr_t x = (sx)->sx_lock; \ 142 \ 143 if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) || \ 144 !atomic_cmpset_ptr(&(sx)->sx_lock, x, x - SX_ONE_SHARER)) \ 145 _sx_sunlock_hard((sx), (file), (line)); \ 146} while (0) 147 148/* 149 * Function prototipes. Routines that start with an underscore are not part 150 * of the public interface and are wrappered with a macro. 151 */ 152void sx_sysinit(void *arg); 153#define sx_init(sx, desc) sx_init_flags((sx), (desc), 0) 154void sx_init_flags(struct sx *sx, const char *description, int opts); 155void sx_destroy(struct sx *sx); 156void _sx_slock(struct sx *sx, const char *file, int line); 157void _sx_xlock(struct sx *sx, const char *file, int line); 158int _sx_try_slock(struct sx *sx, const char *file, int line); 159int _sx_try_xlock(struct sx *sx, const char *file, int line); 160void _sx_sunlock(struct sx *sx, const char *file, int line); 161void _sx_xunlock(struct sx *sx, const char *file, int line); 162int _sx_try_upgrade(struct sx *sx, const char *file, int line); 163void _sx_downgrade(struct sx *sx, const char *file, int line); 164void _sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int 165 line); 166void _sx_slock_hard(struct sx *sx, const char *file, int line); 167void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int 168 line); 169void _sx_sunlock_hard(struct sx *sx, const char *file, int line); 170#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 171void _sx_assert(struct sx *sx, int what, const char *file, int line); 172#endif 173#ifdef DDB 174int sx_chain(struct thread *td, struct thread **ownerp); 175#endif 176 177struct sx_args { 178 struct sx *sa_sx; 179 const char *sa_desc; 180}; 181 182#define SX_SYSINIT(name, sxa, desc) \ 183 static struct sx_args name##_args = { \ 184 (sxa), \ 185 (desc) \ 186 }; \ 187 SYSINIT(name##_sx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 188 sx_sysinit, &name##_args); \ 189 SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 190 sx_destroy, (sxa)) 191 192/* 193 * Public interface for lock operations. 194 */ 195#ifndef LOCK_DEBUG 196#error "LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>" 197#endif 198#if (LOCK_DEBUG > 0) || defined(SX_NOINLINE) 199#define sx_xlock(sx) _sx_xlock((sx), LOCK_FILE, LOCK_LINE) 200#define sx_xunlock(sx) _sx_xunlock((sx), LOCK_FILE, LOCK_LINE) 201#define sx_slock(sx) _sx_slock((sx), LOCK_FILE, LOCK_LINE) 202#define sx_sunlock(sx) _sx_sunlock((sx), LOCK_FILE, LOCK_LINE) 203#else 204#define sx_xlock(sx) \ 205 __sx_xlock((sx), curthread, LOCK_FILE, LOCK_LINE) 206#define sx_xunlock(sx) \ 207 __sx_xunlock((sx), curthread, LOCK_FILE, LOCK_LINE) 208#define sx_slock(sx) __sx_slock((sx), LOCK_FILE, LOCK_LINE) 209#define sx_sunlock(sx) __sx_sunlock((sx), LOCK_FILE, LOCK_LINE) 210#endif /* LOCK_DEBUG > 0 || SX_NOINLINE */ 211#define sx_try_slock(sx) _sx_try_slock((sx), LOCK_FILE, LOCK_LINE) 212#define sx_try_xlock(sx) _sx_try_xlock((sx), LOCK_FILE, LOCK_LINE) 213#define sx_try_upgrade(sx) _sx_try_upgrade((sx), LOCK_FILE, LOCK_LINE) 214#define sx_downgrade(sx) _sx_downgrade((sx), LOCK_FILE, LOCK_LINE) 215 216/* 217 * Return a pointer to the owning thread if the lock is exclusively 218 * locked. 219 */ 220#define sx_xholder(sx) \ 221 ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \ 222 (struct thread *)SX_OWNER((sx)->sx_lock)) 223 224#define sx_xlocked(sx) \ 225 (((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \ 226 (uintptr_t)curthread) 227 228#define sx_unlock(sx) do { \ 229 if (sx_xlocked(sx)) \ 230 sx_xunlock(sx); \ 231 else \ 232 sx_sunlock(sx); \ 233} while (0) 234 235#define sx_sleep(chan, sx, pri, wmesg, timo) \ 236 _sleep((chan), &(sx)->lock_object, (pri), (wmesg), (timo)) 237 238/* 239 * Options passed to sx_init_flags(). 240 */ 241#define SX_DUPOK 0x01 242#define SX_NOPROFILE 0x02 243#define SX_NOWITNESS 0x04 244#define SX_QUIET 0x08 245#define SX_ADAPTIVESPIN 0x10 246#define SX_RECURSE 0x20 247 248#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 249#define SA_LOCKED LA_LOCKED 250#define SA_SLOCKED LA_SLOCKED 251#define SA_XLOCKED LA_XLOCKED 252#define SA_UNLOCKED LA_UNLOCKED 253#define SA_RECURSED LA_RECURSED 254#define SA_NOTRECURSED LA_NOTRECURSED 255 256/* Backwards compatability. */ 257#define SX_LOCKED LA_LOCKED 258#define SX_SLOCKED LA_SLOCKED 259#define SX_XLOCKED LA_XLOCKED 260#define SX_UNLOCKED LA_UNLOCKED 261#define SX_RECURSED LA_RECURSED 262#define SX_NOTRECURSED LA_NOTRECURSED 263#endif 264 265#ifdef INVARIANTS 266#define sx_assert(sx, what) _sx_assert((sx), (what), LOCK_FILE, LOCK_LINE) 267#else 268#define sx_assert(sx, what) 269#endif 270 271#endif /* _KERNEL */ 272 273#endif /* !_SYS_SX_H_ */ 274