1/*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice(s), this list of conditions and the following disclaimer as 11 * the first lines of this file unmodified other than the possible 12 * addition of one or more copyright notices. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice(s), this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 27 * DAMAGE. 28 */ 29 30/* 31 * Shared/exclusive locks. This implementation attempts to ensure 32 * deterministic lock granting behavior, so that slocks and xlocks are 33 * interleaved. 34 * 35 * Priority propagation will not generally raise the priority of lock holders, 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39#include "opt_adaptive_sx.h" 40#include "opt_ddb.h" 41 42#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice(s), this list of conditions and the following disclaimer as 11 * the first lines of this file unmodified other than the possible 12 * addition of one or more copyright notices. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice(s), this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 27 * DAMAGE. 28 */ 29 30/* 31 * Shared/exclusive locks. This implementation attempts to ensure 32 * deterministic lock granting behavior, so that slocks and xlocks are 33 * interleaved. 34 * 35 * Priority propagation will not generally raise the priority of lock holders, 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39#include "opt_adaptive_sx.h" 40#include "opt_ddb.h" 41 42#include <sys/cdefs.h>
|
43__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 168333 2007-04-04 00:11:22Z kmacy $");
| 43__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 169394 2007-05-08 21:51:37Z jhb $");
|
44 45#include <sys/param.h> 46#include <sys/ktr.h> 47#include <sys/lock.h> 48#include <sys/lock_profile.h> 49#include <sys/mutex.h> 50#include <sys/proc.h> 51#include <sys/sleepqueue.h> 52#include <sys/sx.h> 53#include <sys/systm.h> 54 55#ifdef ADAPTIVE_SX 56#include <machine/cpu.h> 57#endif 58 59#ifdef DDB 60#include <ddb/ddb.h> 61#endif 62 63#if !defined(SMP) && defined(ADAPTIVE_SX) 64#error "You must have SMP to enable the ADAPTIVE_SX option" 65#endif 66 67/* Handy macros for sleep queues. */ 68#define SQ_EXCLUSIVE_QUEUE 0 69#define SQ_SHARED_QUEUE 1 70 71/* 72 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 73 * drop Giant anytime we have to sleep or if we adaptively spin. 74 */ 75#define GIANT_DECLARE \ 76 int _giantcnt = 0; \ 77 WITNESS_SAVE_DECL(Giant) \ 78 79#define GIANT_SAVE() do { \ 80 if (mtx_owned(&Giant)) { \ 81 WITNESS_SAVE(&Giant.lock_object, Giant); \ 82 while (mtx_owned(&Giant)) { \ 83 _giantcnt++; \ 84 mtx_unlock(&Giant); \ 85 } \ 86 } \ 87} while (0) 88 89#define GIANT_RESTORE() do { \ 90 if (_giantcnt > 0) { \ 91 mtx_assert(&Giant, MA_NOTOWNED); \ 92 while (_giantcnt--) \ 93 mtx_lock(&Giant); \ 94 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 95 } \ 96} while (0) 97 98/* 99 * Returns true if an exclusive lock is recursed. It curthread 100 * currently has an exclusive lock. 101 */ 102#define sx_recursed(sx) ((sx)->sx_recurse != 0) 103 104/* 105 * Return a pointer to the owning thread if the lock is exclusively 106 * locked. 107 */ 108#define sx_xholder(sx) \ 109 ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \ 110 (struct thread *)SX_OWNER((sx)->sx_lock)) 111 112#ifdef DDB 113static void db_show_sx(struct lock_object *lock); 114#endif 115static void lock_sx(struct lock_object *lock, int how); 116static int unlock_sx(struct lock_object *lock); 117 118struct lock_class lock_class_sx = { 119 .lc_name = "sx", 120 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 121#ifdef DDB 122 .lc_ddb_show = db_show_sx, 123#endif 124 .lc_lock = lock_sx, 125 .lc_unlock = unlock_sx, 126}; 127 128#ifndef INVARIANTS 129#define _sx_assert(sx, what, file, line) 130#endif 131 132void 133lock_sx(struct lock_object *lock, int how) 134{ 135 struct sx *sx; 136 137 sx = (struct sx *)lock; 138 if (how) 139 sx_xlock(sx); 140 else 141 sx_slock(sx); 142} 143 144int 145unlock_sx(struct lock_object *lock) 146{ 147 struct sx *sx; 148 149 sx = (struct sx *)lock; 150 sx_assert(sx, SX_LOCKED | SX_NOTRECURSED); 151 if (sx_xlocked(sx)) { 152 sx_xunlock(sx); 153 return (1); 154 } else { 155 sx_sunlock(sx); 156 return (0); 157 } 158} 159 160void 161sx_sysinit(void *arg) 162{ 163 struct sx_args *sargs = arg; 164 165 sx_init(sargs->sa_sx, sargs->sa_desc); 166} 167 168void 169sx_init_flags(struct sx *sx, const char *description, int opts) 170{ 171 int flags; 172 173 flags = LO_SLEEPABLE | LO_UPGRADABLE | LO_RECURSABLE; 174 if (opts & SX_DUPOK) 175 flags |= LO_DUPOK; 176 if (opts & SX_NOPROFILE) 177 flags |= LO_NOPROFILE; 178 if (!(opts & SX_NOWITNESS)) 179 flags |= LO_WITNESS; 180 if (opts & SX_QUIET) 181 flags |= LO_QUIET; 182 183 flags |= opts & SX_ADAPTIVESPIN; 184 sx->sx_lock = SX_LOCK_UNLOCKED; 185 sx->sx_recurse = 0; 186 lock_profile_object_init(&sx->lock_object, &lock_class_sx, description); 187 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 188} 189 190void 191sx_destroy(struct sx *sx) 192{ 193 194 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 195 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
| 44 45#include <sys/param.h> 46#include <sys/ktr.h> 47#include <sys/lock.h> 48#include <sys/lock_profile.h> 49#include <sys/mutex.h> 50#include <sys/proc.h> 51#include <sys/sleepqueue.h> 52#include <sys/sx.h> 53#include <sys/systm.h> 54 55#ifdef ADAPTIVE_SX 56#include <machine/cpu.h> 57#endif 58 59#ifdef DDB 60#include <ddb/ddb.h> 61#endif 62 63#if !defined(SMP) && defined(ADAPTIVE_SX) 64#error "You must have SMP to enable the ADAPTIVE_SX option" 65#endif 66 67/* Handy macros for sleep queues. */ 68#define SQ_EXCLUSIVE_QUEUE 0 69#define SQ_SHARED_QUEUE 1 70 71/* 72 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 73 * drop Giant anytime we have to sleep or if we adaptively spin. 74 */ 75#define GIANT_DECLARE \ 76 int _giantcnt = 0; \ 77 WITNESS_SAVE_DECL(Giant) \ 78 79#define GIANT_SAVE() do { \ 80 if (mtx_owned(&Giant)) { \ 81 WITNESS_SAVE(&Giant.lock_object, Giant); \ 82 while (mtx_owned(&Giant)) { \ 83 _giantcnt++; \ 84 mtx_unlock(&Giant); \ 85 } \ 86 } \ 87} while (0) 88 89#define GIANT_RESTORE() do { \ 90 if (_giantcnt > 0) { \ 91 mtx_assert(&Giant, MA_NOTOWNED); \ 92 while (_giantcnt--) \ 93 mtx_lock(&Giant); \ 94 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 95 } \ 96} while (0) 97 98/* 99 * Returns true if an exclusive lock is recursed. It curthread 100 * currently has an exclusive lock. 101 */ 102#define sx_recursed(sx) ((sx)->sx_recurse != 0) 103 104/* 105 * Return a pointer to the owning thread if the lock is exclusively 106 * locked. 107 */ 108#define sx_xholder(sx) \ 109 ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \ 110 (struct thread *)SX_OWNER((sx)->sx_lock)) 111 112#ifdef DDB 113static void db_show_sx(struct lock_object *lock); 114#endif 115static void lock_sx(struct lock_object *lock, int how); 116static int unlock_sx(struct lock_object *lock); 117 118struct lock_class lock_class_sx = { 119 .lc_name = "sx", 120 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 121#ifdef DDB 122 .lc_ddb_show = db_show_sx, 123#endif 124 .lc_lock = lock_sx, 125 .lc_unlock = unlock_sx, 126}; 127 128#ifndef INVARIANTS 129#define _sx_assert(sx, what, file, line) 130#endif 131 132void 133lock_sx(struct lock_object *lock, int how) 134{ 135 struct sx *sx; 136 137 sx = (struct sx *)lock; 138 if (how) 139 sx_xlock(sx); 140 else 141 sx_slock(sx); 142} 143 144int 145unlock_sx(struct lock_object *lock) 146{ 147 struct sx *sx; 148 149 sx = (struct sx *)lock; 150 sx_assert(sx, SX_LOCKED | SX_NOTRECURSED); 151 if (sx_xlocked(sx)) { 152 sx_xunlock(sx); 153 return (1); 154 } else { 155 sx_sunlock(sx); 156 return (0); 157 } 158} 159 160void 161sx_sysinit(void *arg) 162{ 163 struct sx_args *sargs = arg; 164 165 sx_init(sargs->sa_sx, sargs->sa_desc); 166} 167 168void 169sx_init_flags(struct sx *sx, const char *description, int opts) 170{ 171 int flags; 172 173 flags = LO_SLEEPABLE | LO_UPGRADABLE | LO_RECURSABLE; 174 if (opts & SX_DUPOK) 175 flags |= LO_DUPOK; 176 if (opts & SX_NOPROFILE) 177 flags |= LO_NOPROFILE; 178 if (!(opts & SX_NOWITNESS)) 179 flags |= LO_WITNESS; 180 if (opts & SX_QUIET) 181 flags |= LO_QUIET; 182 183 flags |= opts & SX_ADAPTIVESPIN; 184 sx->sx_lock = SX_LOCK_UNLOCKED; 185 sx->sx_recurse = 0; 186 lock_profile_object_init(&sx->lock_object, &lock_class_sx, description); 187 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 188} 189 190void 191sx_destroy(struct sx *sx) 192{ 193 194 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 195 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
|
| 196 sx->sx_lock = SX_LOCK_DESTROYED;
|
196 lock_profile_object_destroy(&sx->lock_object); 197 lock_destroy(&sx->lock_object); 198} 199 200void 201_sx_slock(struct sx *sx, const char *file, int line) 202{ 203 204 MPASS(curthread != NULL);
| 197 lock_profile_object_destroy(&sx->lock_object); 198 lock_destroy(&sx->lock_object); 199} 200 201void 202_sx_slock(struct sx *sx, const char *file, int line) 203{ 204 205 MPASS(curthread != NULL);
|
| 206 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 207 ("sx_slock() of destroyed sx @ %s:%d", file, line));
|
205 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line); 206 __sx_slock(sx, file, line); 207 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 208 WITNESS_LOCK(&sx->lock_object, 0, file, line); 209 curthread->td_locks++; 210} 211 212int 213_sx_try_slock(struct sx *sx, const char *file, int line) 214{ 215 uintptr_t x; 216 217 x = sx->sx_lock;
| 208 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line); 209 __sx_slock(sx, file, line); 210 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 211 WITNESS_LOCK(&sx->lock_object, 0, file, line); 212 curthread->td_locks++; 213} 214 215int 216_sx_try_slock(struct sx *sx, const char *file, int line) 217{ 218 uintptr_t x; 219 220 x = sx->sx_lock;
|
| 221 KASSERT(x != SX_LOCK_DESTROYED, 222 ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
|
218 if ((x & SX_LOCK_SHARED) && atomic_cmpset_acq_ptr(&sx->sx_lock, x, 219 x + SX_ONE_SHARER)) { 220 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 221 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 222 curthread->td_locks++; 223 return (1); 224 } 225 226 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 227 return (0); 228} 229 230void 231_sx_xlock(struct sx *sx, const char *file, int line) 232{ 233 234 MPASS(curthread != NULL);
| 223 if ((x & SX_LOCK_SHARED) && atomic_cmpset_acq_ptr(&sx->sx_lock, x, 224 x + SX_ONE_SHARER)) { 225 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 226 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 227 curthread->td_locks++; 228 return (1); 229 } 230 231 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 232 return (0); 233} 234 235void 236_sx_xlock(struct sx *sx, const char *file, int line) 237{ 238 239 MPASS(curthread != NULL);
|
| 240 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 241 ("sx_xlock() of destroyed sx @ %s:%d", file, line));
|
235 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 236 line); 237 __sx_xlock(sx, curthread, file, line); 238 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); 239 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 240 curthread->td_locks++; 241} 242 243int 244_sx_try_xlock(struct sx *sx, const char *file, int line) 245{ 246 int rval; 247 248 MPASS(curthread != NULL);
| 242 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 243 line); 244 __sx_xlock(sx, curthread, file, line); 245 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); 246 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 247 curthread->td_locks++; 248} 249 250int 251_sx_try_xlock(struct sx *sx, const char *file, int line) 252{ 253 int rval; 254 255 MPASS(curthread != NULL);
|
| 256 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 257 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
|
249 250 if (sx_xlocked(sx)) { 251 sx->sx_recurse++; 252 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 253 rval = 1; 254 } else 255 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, 256 (uintptr_t)curthread); 257 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 258 if (rval) { 259 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 260 file, line); 261 curthread->td_locks++; 262 } 263 264 return (rval); 265} 266 267void 268_sx_sunlock(struct sx *sx, const char *file, int line) 269{ 270 271 MPASS(curthread != NULL);
| 258 259 if (sx_xlocked(sx)) { 260 sx->sx_recurse++; 261 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 262 rval = 1; 263 } else 264 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, 265 (uintptr_t)curthread); 266 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 267 if (rval) { 268 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 269 file, line); 270 curthread->td_locks++; 271 } 272 273 return (rval); 274} 275 276void 277_sx_sunlock(struct sx *sx, const char *file, int line) 278{ 279 280 MPASS(curthread != NULL);
|
| 281 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 282 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
|
272 _sx_assert(sx, SX_SLOCKED, file, line); 273 curthread->td_locks--; 274 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 275 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 276 if (SX_SHARERS(sx->sx_lock) == 0) 277 lock_profile_release_lock(&sx->lock_object); 278 __sx_sunlock(sx, file, line); 279} 280 281void 282_sx_xunlock(struct sx *sx, const char *file, int line) 283{ 284 285 MPASS(curthread != NULL);
| 283 _sx_assert(sx, SX_SLOCKED, file, line); 284 curthread->td_locks--; 285 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 286 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 287 if (SX_SHARERS(sx->sx_lock) == 0) 288 lock_profile_release_lock(&sx->lock_object); 289 __sx_sunlock(sx, file, line); 290} 291 292void 293_sx_xunlock(struct sx *sx, const char *file, int line) 294{ 295 296 MPASS(curthread != NULL);
|
| 297 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 298 ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
|
286 _sx_assert(sx, SX_XLOCKED, file, line); 287 curthread->td_locks--; 288 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 289 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 290 line); 291 if (!sx_recursed(sx)) 292 lock_profile_release_lock(&sx->lock_object); 293 __sx_xunlock(sx, curthread, file, line); 294} 295 296/* 297 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 298 * This will only succeed if this thread holds a single shared lock. 299 * Return 1 if if the upgrade succeed, 0 otherwise. 300 */ 301int 302_sx_try_upgrade(struct sx *sx, const char *file, int line) 303{ 304 uintptr_t x; 305 int success; 306
| 299 _sx_assert(sx, SX_XLOCKED, file, line); 300 curthread->td_locks--; 301 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 302 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 303 line); 304 if (!sx_recursed(sx)) 305 lock_profile_release_lock(&sx->lock_object); 306 __sx_xunlock(sx, curthread, file, line); 307} 308 309/* 310 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 311 * This will only succeed if this thread holds a single shared lock. 312 * Return 1 if if the upgrade succeed, 0 otherwise. 313 */ 314int 315_sx_try_upgrade(struct sx *sx, const char *file, int line) 316{ 317 uintptr_t x; 318 int success; 319
|
| 320 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 321 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
|
307 _sx_assert(sx, SX_SLOCKED, file, line); 308 309 /* 310 * Try to switch from one shared lock to an exclusive lock. We need 311 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 312 * we will wake up the exclusive waiters when we drop the lock. 313 */ 314 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; 315 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, 316 (uintptr_t)curthread | x); 317 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 318 if (success) 319 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 320 file, line); 321 return (success); 322} 323 324/* 325 * Downgrade an unrecursed exclusive lock into a single shared lock. 326 */ 327void 328_sx_downgrade(struct sx *sx, const char *file, int line) 329{ 330 uintptr_t x; 331
| 322 _sx_assert(sx, SX_SLOCKED, file, line); 323 324 /* 325 * Try to switch from one shared lock to an exclusive lock. We need 326 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 327 * we will wake up the exclusive waiters when we drop the lock. 328 */ 329 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; 330 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, 331 (uintptr_t)curthread | x); 332 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 333 if (success) 334 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 335 file, line); 336 return (success); 337} 338 339/* 340 * Downgrade an unrecursed exclusive lock into a single shared lock. 341 */ 342void 343_sx_downgrade(struct sx *sx, const char *file, int line) 344{ 345 uintptr_t x; 346
|
| 347 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 348 ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
|
332 _sx_assert(sx, SX_XLOCKED | SX_NOTRECURSED, file, line); 333#ifndef INVARIANTS 334 if (sx_recursed(sx)) 335 panic("downgrade of a recursed lock"); 336#endif 337 338 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 339 340 /* 341 * Try to switch from an exclusive lock with no shared waiters 342 * to one sharer with no shared waiters. If there are 343 * exclusive waiters, we don't need to lock the sleep queue so 344 * long as we preserve the flag. We do one quick try and if 345 * that fails we grab the sleepq lock to keep the flags from 346 * changing and do it the slow way. 347 * 348 * We have to lock the sleep queue if there are shared waiters 349 * so we can wake them up. 350 */ 351 x = sx->sx_lock; 352 if (!(x & SX_LOCK_SHARED_WAITERS) && 353 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 354 (x & SX_LOCK_EXCLUSIVE_WAITERS))) { 355 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 356 return; 357 } 358 359 /* 360 * Lock the sleep queue so we can read the waiters bits 361 * without any races and wakeup any shared waiters. 362 */ 363 sleepq_lock(&sx->lock_object); 364 365 /* 366 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 367 * shared lock. If there are any shared waiters, wake them up. 368 */ 369 x = sx->sx_lock; 370 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 371 (x & SX_LOCK_EXCLUSIVE_WAITERS)); 372 if (x & SX_LOCK_SHARED_WAITERS) 373 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, 374 SQ_SHARED_QUEUE); 375 else 376 sleepq_release(&sx->lock_object); 377 378 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 379} 380 381/* 382 * This function represents the so-called 'hard case' for sx_xlock 383 * operation. All 'easy case' failures are redirected to this. Note 384 * that ideally this would be a static function, but it needs to be 385 * accessible from at least sx.h. 386 */ 387void 388_sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) 389{ 390 GIANT_DECLARE; 391#ifdef ADAPTIVE_SX 392 volatile struct thread *owner; 393#endif 394 uintptr_t x; 395 int contested = 0; 396 uint64_t waitstart = 0; 397 398 /* If we already hold an exclusive lock, then recurse. */ 399 if (sx_xlocked(sx)) { 400 sx->sx_recurse++; 401 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 402 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 403 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 404 return; 405 } 406 lock_profile_obtain_lock_failed(&(sx)->lock_object, 407 &contested, &waitstart); 408 409 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 410 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 411 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 412 413 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) { 414#ifdef ADAPTIVE_SX 415 /* 416 * If the lock is write locked and the owner is 417 * running on another CPU, spin until the owner stops 418 * running or the state of the lock changes. 419 */ 420 x = sx->sx_lock; 421 if (!(x & SX_LOCK_SHARED) && 422 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 423 x = SX_OWNER(x); 424 owner = (struct thread *)x; 425 if (TD_IS_RUNNING(owner)) { 426 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 427 CTR3(KTR_LOCK, 428 "%s: spinning on %p held by %p", 429 __func__, sx, owner); 430 GIANT_SAVE(); 431 while (SX_OWNER(sx->sx_lock) == x && 432 TD_IS_RUNNING(owner)) 433 cpu_spinwait(); 434 continue; 435 } 436 } 437#endif 438 439 sleepq_lock(&sx->lock_object); 440 x = sx->sx_lock; 441 442 /* 443 * If the lock was released while spinning on the 444 * sleep queue chain lock, try again. 445 */ 446 if (x == SX_LOCK_UNLOCKED) { 447 sleepq_release(&sx->lock_object); 448 continue; 449 } 450 451#ifdef ADAPTIVE_SX 452 /* 453 * The current lock owner might have started executing 454 * on another CPU (or the lock could have changed 455 * owners) while we were waiting on the sleep queue 456 * chain lock. If so, drop the sleep queue lock and try 457 * again. 458 */ 459 if (!(x & SX_LOCK_SHARED) && 460 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 461 owner = (struct thread *)SX_OWNER(x); 462 if (TD_IS_RUNNING(owner)) { 463 sleepq_release(&sx->lock_object); 464 continue; 465 } 466 } 467#endif 468 469 /* 470 * If an exclusive lock was released with both shared 471 * and exclusive waiters and a shared waiter hasn't 472 * woken up and acquired the lock yet, sx_lock will be 473 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 474 * If we see that value, try to acquire it once. Note 475 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 476 * as there are other exclusive waiters still. If we 477 * fail, restart the loop. 478 */ 479 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { 480 if (atomic_cmpset_acq_ptr(&sx->sx_lock, 481 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS, 482 tid | SX_LOCK_EXCLUSIVE_WAITERS)) { 483 sleepq_release(&sx->lock_object); 484 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 485 __func__, sx); 486 break; 487 } 488 sleepq_release(&sx->lock_object); 489 continue; 490 } 491 492 /* 493 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 494 * than loop back and retry. 495 */ 496 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 497 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 498 x | SX_LOCK_EXCLUSIVE_WAITERS)) { 499 sleepq_release(&sx->lock_object); 500 continue; 501 } 502 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 503 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 504 __func__, sx); 505 } 506 507 /* 508 * Since we have been unable to acquire the exclusive 509 * lock and the exclusive waiters flag is set, we have 510 * to sleep. 511 */ 512 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 513 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 514 __func__, sx); 515 516 GIANT_SAVE(); 517 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 518 SLEEPQ_SX, SQ_EXCLUSIVE_QUEUE); 519 sleepq_wait(&sx->lock_object); 520 521 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 522 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 523 __func__, sx); 524 } 525 526 GIANT_RESTORE(); 527 lock_profile_obtain_lock_success(&(sx)->lock_object, contested, 528 waitstart, file, line); 529} 530 531/* 532 * This function represents the so-called 'hard case' for sx_xunlock 533 * operation. All 'easy case' failures are redirected to this. Note 534 * that ideally this would be a static function, but it needs to be 535 * accessible from at least sx.h. 536 */ 537void 538_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) 539{ 540 uintptr_t x; 541 int queue; 542 543 MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); 544 545 /* If the lock is recursed, then unrecurse one level. */ 546 if (sx_xlocked(sx) && sx_recursed(sx)) { 547 if ((--sx->sx_recurse) == 0) 548 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 549 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 550 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 551 return; 552 } 553 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | 554 SX_LOCK_EXCLUSIVE_WAITERS)); 555 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 556 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 557 558 sleepq_lock(&sx->lock_object); 559 x = SX_LOCK_UNLOCKED; 560 561 /* 562 * The wake up algorithm here is quite simple and probably not 563 * ideal. It gives precedence to shared waiters if they are 564 * present. For this condition, we have to preserve the 565 * state of the exclusive waiters flag. 566 */ 567 if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) { 568 queue = SQ_SHARED_QUEUE; 569 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS); 570 } else 571 queue = SQ_EXCLUSIVE_QUEUE; 572 573 /* Wake up all the waiters for the specific queue. */ 574 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 575 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 576 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 577 "exclusive"); 578 atomic_store_rel_ptr(&sx->sx_lock, x); 579 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue); 580} 581 582/* 583 * This function represents the so-called 'hard case' for sx_slock 584 * operation. All 'easy case' failures are redirected to this. Note 585 * that ideally this would be a static function, but it needs to be 586 * accessible from at least sx.h. 587 */ 588void 589_sx_slock_hard(struct sx *sx, const char *file, int line) 590{ 591 GIANT_DECLARE; 592#ifdef ADAPTIVE_SX 593 volatile struct thread *owner; 594#endif 595 uintptr_t x; 596 uint64_t waitstart = 0; 597 int contested = 0; 598 /* 599 * As with rwlocks, we don't make any attempt to try to block 600 * shared locks once there is an exclusive waiter. 601 */ 602 603 for (;;) { 604 x = sx->sx_lock; 605 606 /* 607 * If no other thread has an exclusive lock then try to bump up 608 * the count of sharers. Since we have to preserve the state 609 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 610 * shared lock loop back and retry. 611 */ 612 if (x & SX_LOCK_SHARED) { 613 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); 614 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, 615 x + SX_ONE_SHARER)) { 616 if (SX_SHARERS(x) == 0) 617 lock_profile_obtain_lock_success( 618 &sx->lock_object, contested, 619 waitstart, file, line); 620 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 621 CTR4(KTR_LOCK, 622 "%s: %p succeed %p -> %p", __func__, 623 sx, (void *)x, 624 (void *)(x + SX_ONE_SHARER)); 625 break; 626 } 627 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 628 &waitstart); 629 630 continue; 631 } 632 633#ifdef ADAPTIVE_SX 634 /* 635 * If the owner is running on another CPU, spin until 636 * the owner stops running or the state of the lock 637 * changes. 638 */ 639 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) { 640 x = SX_OWNER(x); 641 owner = (struct thread *)x; 642 if (TD_IS_RUNNING(owner)) { 643 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 644 &waitstart); 645 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 646 CTR3(KTR_LOCK, 647 "%s: spinning on %p held by %p", 648 __func__, sx, owner); 649 GIANT_SAVE(); 650 while (SX_OWNER(sx->sx_lock) == x && 651 TD_IS_RUNNING(owner)) 652 cpu_spinwait(); 653 continue; 654 } 655 } 656#endif 657 else 658 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 659 &waitstart); 660 661 /* 662 * Some other thread already has an exclusive lock, so 663 * start the process of blocking. 664 */ 665 sleepq_lock(&sx->lock_object); 666 x = sx->sx_lock; 667 668 /* 669 * The lock could have been released while we spun. 670 * In this case loop back and retry. 671 */ 672 if (x & SX_LOCK_SHARED) { 673 sleepq_release(&sx->lock_object); 674 continue; 675 } 676 677#ifdef ADAPTIVE_SX 678 /* 679 * If the owner is running on another CPU, spin until 680 * the owner stops running or the state of the lock 681 * changes. 682 */ 683 if (!(x & SX_LOCK_SHARED) && 684 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 685 owner = (struct thread *)SX_OWNER(x); 686 if (TD_IS_RUNNING(owner)) { 687 sleepq_release(&sx->lock_object); 688 continue; 689 } 690 } 691#endif 692 693 /* 694 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 695 * fail to set it drop the sleep queue lock and loop 696 * back. 697 */ 698 if (!(x & SX_LOCK_SHARED_WAITERS)) { 699 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 700 x | SX_LOCK_SHARED_WAITERS)) { 701 sleepq_release(&sx->lock_object); 702 continue; 703 } 704 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 705 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 706 __func__, sx); 707 } 708 709 /* 710 * Since we have been unable to acquire the shared lock, 711 * we have to sleep. 712 */ 713 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 714 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 715 __func__, sx); 716 717 GIANT_SAVE(); 718 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 719 SLEEPQ_SX, SQ_SHARED_QUEUE); 720 sleepq_wait(&sx->lock_object); 721 722 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 723 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 724 __func__, sx); 725 } 726 727 GIANT_RESTORE(); 728} 729 730/* 731 * This function represents the so-called 'hard case' for sx_sunlock 732 * operation. All 'easy case' failures are redirected to this. Note 733 * that ideally this would be a static function, but it needs to be 734 * accessible from at least sx.h. 735 */ 736void 737_sx_sunlock_hard(struct sx *sx, const char *file, int line) 738{ 739 uintptr_t x; 740 741 for (;;) { 742 x = sx->sx_lock; 743 744 /* 745 * We should never have sharers while at least one thread 746 * holds a shared lock. 747 */ 748 KASSERT(!(x & SX_LOCK_SHARED_WAITERS), 749 ("%s: waiting sharers", __func__)); 750 751 /* 752 * See if there is more than one shared lock held. If 753 * so, just drop one and return. 754 */ 755 if (SX_SHARERS(x) > 1) { 756 if (atomic_cmpset_ptr(&sx->sx_lock, x, 757 x - SX_ONE_SHARER)) { 758 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 759 CTR4(KTR_LOCK, 760 "%s: %p succeeded %p -> %p", 761 __func__, sx, (void *)x, 762 (void *)(x - SX_ONE_SHARER)); 763 break; 764 } 765 continue; 766 } 767 768 /* 769 * If there aren't any waiters for an exclusive lock, 770 * then try to drop it quickly. 771 */ 772 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 773 MPASS(x == SX_SHARERS_LOCK(1)); 774 if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1), 775 SX_LOCK_UNLOCKED)) { 776 lock_profile_release_lock(&sx->lock_object); 777 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 778 CTR2(KTR_LOCK, "%s: %p last succeeded", 779 __func__, sx); 780 break; 781 } 782 continue; 783 } 784 785 /* 786 * At this point, there should just be one sharer with 787 * exclusive waiters. 788 */ 789 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); 790 791 lock_profile_release_lock(&sx->lock_object); 792 sleepq_lock(&sx->lock_object); 793 794 /* 795 * Wake up semantic here is quite simple: 796 * Just wake up all the exclusive waiters. 797 * Note that the state of the lock could have changed, 798 * so if it fails loop back and retry. 799 */ 800 if (!atomic_cmpset_ptr(&sx->sx_lock, 801 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS, 802 SX_LOCK_UNLOCKED)) { 803 sleepq_release(&sx->lock_object); 804 continue; 805 } 806 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 807 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 808 "exclusive queue", __func__, sx); 809 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, 810 SQ_EXCLUSIVE_QUEUE); 811 break; 812 } 813} 814 815#ifdef INVARIANT_SUPPORT 816#ifndef INVARIANTS 817#undef _sx_assert 818#endif 819 820/* 821 * In the non-WITNESS case, sx_assert() can only detect that at least 822 * *some* thread owns an slock, but it cannot guarantee that *this* 823 * thread owns an slock. 824 */ 825void 826_sx_assert(struct sx *sx, int what, const char *file, int line) 827{ 828#ifndef WITNESS 829 int slocked = 0; 830#endif 831 832 if (panicstr != NULL) 833 return; 834 switch (what) { 835 case SX_SLOCKED: 836 case SX_SLOCKED | SX_NOTRECURSED: 837 case SX_SLOCKED | SX_RECURSED: 838#ifndef WITNESS 839 slocked = 1; 840 /* FALLTHROUGH */ 841#endif 842 case SX_LOCKED: 843 case SX_LOCKED | SX_NOTRECURSED: 844 case SX_LOCKED | SX_RECURSED: 845#ifdef WITNESS 846 witness_assert(&sx->lock_object, what, file, line); 847#else 848 /* 849 * If some other thread has an exclusive lock or we 850 * have one and are asserting a shared lock, fail. 851 * Also, if no one has a lock at all, fail. 852 */ 853 if (sx->sx_lock == SX_LOCK_UNLOCKED || 854 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 855 sx_xholder(sx) != curthread))) 856 panic("Lock %s not %slocked @ %s:%d\n", 857 sx->lock_object.lo_name, slocked ? "share " : "", 858 file, line); 859 860 if (!(sx->sx_lock & SX_LOCK_SHARED)) { 861 if (sx_recursed(sx)) { 862 if (what & SX_NOTRECURSED) 863 panic("Lock %s recursed @ %s:%d\n", 864 sx->lock_object.lo_name, file, 865 line); 866 } else if (what & SX_RECURSED) 867 panic("Lock %s not recursed @ %s:%d\n", 868 sx->lock_object.lo_name, file, line); 869 } 870#endif 871 break; 872 case SX_XLOCKED: 873 case SX_XLOCKED | SX_NOTRECURSED: 874 case SX_XLOCKED | SX_RECURSED: 875 if (sx_xholder(sx) != curthread) 876 panic("Lock %s not exclusively locked @ %s:%d\n", 877 sx->lock_object.lo_name, file, line); 878 if (sx_recursed(sx)) { 879 if (what & SX_NOTRECURSED) 880 panic("Lock %s recursed @ %s:%d\n", 881 sx->lock_object.lo_name, file, line); 882 } else if (what & SX_RECURSED) 883 panic("Lock %s not recursed @ %s:%d\n", 884 sx->lock_object.lo_name, file, line); 885 break; 886 case SX_UNLOCKED: 887#ifdef WITNESS 888 witness_assert(&sx->lock_object, what, file, line); 889#else 890 /* 891 * If we hold an exclusve lock fail. We can't 892 * reliably check to see if we hold a shared lock or 893 * not. 894 */ 895 if (sx_xholder(sx) == curthread) 896 panic("Lock %s exclusively locked @ %s:%d\n", 897 sx->lock_object.lo_name, file, line); 898#endif 899 break; 900 default: 901 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 902 line); 903 } 904} 905#endif /* INVARIANT_SUPPORT */ 906 907#ifdef DDB 908static void 909db_show_sx(struct lock_object *lock) 910{ 911 struct thread *td; 912 struct sx *sx; 913 914 sx = (struct sx *)lock; 915 916 db_printf(" state: "); 917 if (sx->sx_lock == SX_LOCK_UNLOCKED) 918 db_printf("UNLOCKED\n");
| 349 _sx_assert(sx, SX_XLOCKED | SX_NOTRECURSED, file, line); 350#ifndef INVARIANTS 351 if (sx_recursed(sx)) 352 panic("downgrade of a recursed lock"); 353#endif 354 355 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 356 357 /* 358 * Try to switch from an exclusive lock with no shared waiters 359 * to one sharer with no shared waiters. If there are 360 * exclusive waiters, we don't need to lock the sleep queue so 361 * long as we preserve the flag. We do one quick try and if 362 * that fails we grab the sleepq lock to keep the flags from 363 * changing and do it the slow way. 364 * 365 * We have to lock the sleep queue if there are shared waiters 366 * so we can wake them up. 367 */ 368 x = sx->sx_lock; 369 if (!(x & SX_LOCK_SHARED_WAITERS) && 370 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 371 (x & SX_LOCK_EXCLUSIVE_WAITERS))) { 372 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 373 return; 374 } 375 376 /* 377 * Lock the sleep queue so we can read the waiters bits 378 * without any races and wakeup any shared waiters. 379 */ 380 sleepq_lock(&sx->lock_object); 381 382 /* 383 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 384 * shared lock. If there are any shared waiters, wake them up. 385 */ 386 x = sx->sx_lock; 387 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 388 (x & SX_LOCK_EXCLUSIVE_WAITERS)); 389 if (x & SX_LOCK_SHARED_WAITERS) 390 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, 391 SQ_SHARED_QUEUE); 392 else 393 sleepq_release(&sx->lock_object); 394 395 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 396} 397 398/* 399 * This function represents the so-called 'hard case' for sx_xlock 400 * operation. All 'easy case' failures are redirected to this. Note 401 * that ideally this would be a static function, but it needs to be 402 * accessible from at least sx.h. 403 */ 404void 405_sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) 406{ 407 GIANT_DECLARE; 408#ifdef ADAPTIVE_SX 409 volatile struct thread *owner; 410#endif 411 uintptr_t x; 412 int contested = 0; 413 uint64_t waitstart = 0; 414 415 /* If we already hold an exclusive lock, then recurse. */ 416 if (sx_xlocked(sx)) { 417 sx->sx_recurse++; 418 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 419 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 420 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 421 return; 422 } 423 lock_profile_obtain_lock_failed(&(sx)->lock_object, 424 &contested, &waitstart); 425 426 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 427 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 428 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 429 430 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) { 431#ifdef ADAPTIVE_SX 432 /* 433 * If the lock is write locked and the owner is 434 * running on another CPU, spin until the owner stops 435 * running or the state of the lock changes. 436 */ 437 x = sx->sx_lock; 438 if (!(x & SX_LOCK_SHARED) && 439 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 440 x = SX_OWNER(x); 441 owner = (struct thread *)x; 442 if (TD_IS_RUNNING(owner)) { 443 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 444 CTR3(KTR_LOCK, 445 "%s: spinning on %p held by %p", 446 __func__, sx, owner); 447 GIANT_SAVE(); 448 while (SX_OWNER(sx->sx_lock) == x && 449 TD_IS_RUNNING(owner)) 450 cpu_spinwait(); 451 continue; 452 } 453 } 454#endif 455 456 sleepq_lock(&sx->lock_object); 457 x = sx->sx_lock; 458 459 /* 460 * If the lock was released while spinning on the 461 * sleep queue chain lock, try again. 462 */ 463 if (x == SX_LOCK_UNLOCKED) { 464 sleepq_release(&sx->lock_object); 465 continue; 466 } 467 468#ifdef ADAPTIVE_SX 469 /* 470 * The current lock owner might have started executing 471 * on another CPU (or the lock could have changed 472 * owners) while we were waiting on the sleep queue 473 * chain lock. If so, drop the sleep queue lock and try 474 * again. 475 */ 476 if (!(x & SX_LOCK_SHARED) && 477 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 478 owner = (struct thread *)SX_OWNER(x); 479 if (TD_IS_RUNNING(owner)) { 480 sleepq_release(&sx->lock_object); 481 continue; 482 } 483 } 484#endif 485 486 /* 487 * If an exclusive lock was released with both shared 488 * and exclusive waiters and a shared waiter hasn't 489 * woken up and acquired the lock yet, sx_lock will be 490 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 491 * If we see that value, try to acquire it once. Note 492 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 493 * as there are other exclusive waiters still. If we 494 * fail, restart the loop. 495 */ 496 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { 497 if (atomic_cmpset_acq_ptr(&sx->sx_lock, 498 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS, 499 tid | SX_LOCK_EXCLUSIVE_WAITERS)) { 500 sleepq_release(&sx->lock_object); 501 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 502 __func__, sx); 503 break; 504 } 505 sleepq_release(&sx->lock_object); 506 continue; 507 } 508 509 /* 510 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 511 * than loop back and retry. 512 */ 513 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 514 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 515 x | SX_LOCK_EXCLUSIVE_WAITERS)) { 516 sleepq_release(&sx->lock_object); 517 continue; 518 } 519 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 520 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 521 __func__, sx); 522 } 523 524 /* 525 * Since we have been unable to acquire the exclusive 526 * lock and the exclusive waiters flag is set, we have 527 * to sleep. 528 */ 529 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 530 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 531 __func__, sx); 532 533 GIANT_SAVE(); 534 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 535 SLEEPQ_SX, SQ_EXCLUSIVE_QUEUE); 536 sleepq_wait(&sx->lock_object); 537 538 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 539 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 540 __func__, sx); 541 } 542 543 GIANT_RESTORE(); 544 lock_profile_obtain_lock_success(&(sx)->lock_object, contested, 545 waitstart, file, line); 546} 547 548/* 549 * This function represents the so-called 'hard case' for sx_xunlock 550 * operation. All 'easy case' failures are redirected to this. Note 551 * that ideally this would be a static function, but it needs to be 552 * accessible from at least sx.h. 553 */ 554void 555_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) 556{ 557 uintptr_t x; 558 int queue; 559 560 MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); 561 562 /* If the lock is recursed, then unrecurse one level. */ 563 if (sx_xlocked(sx) && sx_recursed(sx)) { 564 if ((--sx->sx_recurse) == 0) 565 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 566 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 567 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 568 return; 569 } 570 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | 571 SX_LOCK_EXCLUSIVE_WAITERS)); 572 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 573 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 574 575 sleepq_lock(&sx->lock_object); 576 x = SX_LOCK_UNLOCKED; 577 578 /* 579 * The wake up algorithm here is quite simple and probably not 580 * ideal. It gives precedence to shared waiters if they are 581 * present. For this condition, we have to preserve the 582 * state of the exclusive waiters flag. 583 */ 584 if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) { 585 queue = SQ_SHARED_QUEUE; 586 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS); 587 } else 588 queue = SQ_EXCLUSIVE_QUEUE; 589 590 /* Wake up all the waiters for the specific queue. */ 591 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 592 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 593 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 594 "exclusive"); 595 atomic_store_rel_ptr(&sx->sx_lock, x); 596 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue); 597} 598 599/* 600 * This function represents the so-called 'hard case' for sx_slock 601 * operation. All 'easy case' failures are redirected to this. Note 602 * that ideally this would be a static function, but it needs to be 603 * accessible from at least sx.h. 604 */ 605void 606_sx_slock_hard(struct sx *sx, const char *file, int line) 607{ 608 GIANT_DECLARE; 609#ifdef ADAPTIVE_SX 610 volatile struct thread *owner; 611#endif 612 uintptr_t x; 613 uint64_t waitstart = 0; 614 int contested = 0; 615 /* 616 * As with rwlocks, we don't make any attempt to try to block 617 * shared locks once there is an exclusive waiter. 618 */ 619 620 for (;;) { 621 x = sx->sx_lock; 622 623 /* 624 * If no other thread has an exclusive lock then try to bump up 625 * the count of sharers. Since we have to preserve the state 626 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 627 * shared lock loop back and retry. 628 */ 629 if (x & SX_LOCK_SHARED) { 630 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); 631 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, 632 x + SX_ONE_SHARER)) { 633 if (SX_SHARERS(x) == 0) 634 lock_profile_obtain_lock_success( 635 &sx->lock_object, contested, 636 waitstart, file, line); 637 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 638 CTR4(KTR_LOCK, 639 "%s: %p succeed %p -> %p", __func__, 640 sx, (void *)x, 641 (void *)(x + SX_ONE_SHARER)); 642 break; 643 } 644 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 645 &waitstart); 646 647 continue; 648 } 649 650#ifdef ADAPTIVE_SX 651 /* 652 * If the owner is running on another CPU, spin until 653 * the owner stops running or the state of the lock 654 * changes. 655 */ 656 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) { 657 x = SX_OWNER(x); 658 owner = (struct thread *)x; 659 if (TD_IS_RUNNING(owner)) { 660 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 661 &waitstart); 662 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 663 CTR3(KTR_LOCK, 664 "%s: spinning on %p held by %p", 665 __func__, sx, owner); 666 GIANT_SAVE(); 667 while (SX_OWNER(sx->sx_lock) == x && 668 TD_IS_RUNNING(owner)) 669 cpu_spinwait(); 670 continue; 671 } 672 } 673#endif 674 else 675 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 676 &waitstart); 677 678 /* 679 * Some other thread already has an exclusive lock, so 680 * start the process of blocking. 681 */ 682 sleepq_lock(&sx->lock_object); 683 x = sx->sx_lock; 684 685 /* 686 * The lock could have been released while we spun. 687 * In this case loop back and retry. 688 */ 689 if (x & SX_LOCK_SHARED) { 690 sleepq_release(&sx->lock_object); 691 continue; 692 } 693 694#ifdef ADAPTIVE_SX 695 /* 696 * If the owner is running on another CPU, spin until 697 * the owner stops running or the state of the lock 698 * changes. 699 */ 700 if (!(x & SX_LOCK_SHARED) && 701 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) { 702 owner = (struct thread *)SX_OWNER(x); 703 if (TD_IS_RUNNING(owner)) { 704 sleepq_release(&sx->lock_object); 705 continue; 706 } 707 } 708#endif 709 710 /* 711 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 712 * fail to set it drop the sleep queue lock and loop 713 * back. 714 */ 715 if (!(x & SX_LOCK_SHARED_WAITERS)) { 716 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 717 x | SX_LOCK_SHARED_WAITERS)) { 718 sleepq_release(&sx->lock_object); 719 continue; 720 } 721 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 722 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 723 __func__, sx); 724 } 725 726 /* 727 * Since we have been unable to acquire the shared lock, 728 * we have to sleep. 729 */ 730 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 731 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 732 __func__, sx); 733 734 GIANT_SAVE(); 735 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 736 SLEEPQ_SX, SQ_SHARED_QUEUE); 737 sleepq_wait(&sx->lock_object); 738 739 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 740 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 741 __func__, sx); 742 } 743 744 GIANT_RESTORE(); 745} 746 747/* 748 * This function represents the so-called 'hard case' for sx_sunlock 749 * operation. All 'easy case' failures are redirected to this. Note 750 * that ideally this would be a static function, but it needs to be 751 * accessible from at least sx.h. 752 */ 753void 754_sx_sunlock_hard(struct sx *sx, const char *file, int line) 755{ 756 uintptr_t x; 757 758 for (;;) { 759 x = sx->sx_lock; 760 761 /* 762 * We should never have sharers while at least one thread 763 * holds a shared lock. 764 */ 765 KASSERT(!(x & SX_LOCK_SHARED_WAITERS), 766 ("%s: waiting sharers", __func__)); 767 768 /* 769 * See if there is more than one shared lock held. If 770 * so, just drop one and return. 771 */ 772 if (SX_SHARERS(x) > 1) { 773 if (atomic_cmpset_ptr(&sx->sx_lock, x, 774 x - SX_ONE_SHARER)) { 775 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 776 CTR4(KTR_LOCK, 777 "%s: %p succeeded %p -> %p", 778 __func__, sx, (void *)x, 779 (void *)(x - SX_ONE_SHARER)); 780 break; 781 } 782 continue; 783 } 784 785 /* 786 * If there aren't any waiters for an exclusive lock, 787 * then try to drop it quickly. 788 */ 789 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 790 MPASS(x == SX_SHARERS_LOCK(1)); 791 if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1), 792 SX_LOCK_UNLOCKED)) { 793 lock_profile_release_lock(&sx->lock_object); 794 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 795 CTR2(KTR_LOCK, "%s: %p last succeeded", 796 __func__, sx); 797 break; 798 } 799 continue; 800 } 801 802 /* 803 * At this point, there should just be one sharer with 804 * exclusive waiters. 805 */ 806 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); 807 808 lock_profile_release_lock(&sx->lock_object); 809 sleepq_lock(&sx->lock_object); 810 811 /* 812 * Wake up semantic here is quite simple: 813 * Just wake up all the exclusive waiters. 814 * Note that the state of the lock could have changed, 815 * so if it fails loop back and retry. 816 */ 817 if (!atomic_cmpset_ptr(&sx->sx_lock, 818 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS, 819 SX_LOCK_UNLOCKED)) { 820 sleepq_release(&sx->lock_object); 821 continue; 822 } 823 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 824 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 825 "exclusive queue", __func__, sx); 826 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, 827 SQ_EXCLUSIVE_QUEUE); 828 break; 829 } 830} 831 832#ifdef INVARIANT_SUPPORT 833#ifndef INVARIANTS 834#undef _sx_assert 835#endif 836 837/* 838 * In the non-WITNESS case, sx_assert() can only detect that at least 839 * *some* thread owns an slock, but it cannot guarantee that *this* 840 * thread owns an slock. 841 */ 842void 843_sx_assert(struct sx *sx, int what, const char *file, int line) 844{ 845#ifndef WITNESS 846 int slocked = 0; 847#endif 848 849 if (panicstr != NULL) 850 return; 851 switch (what) { 852 case SX_SLOCKED: 853 case SX_SLOCKED | SX_NOTRECURSED: 854 case SX_SLOCKED | SX_RECURSED: 855#ifndef WITNESS 856 slocked = 1; 857 /* FALLTHROUGH */ 858#endif 859 case SX_LOCKED: 860 case SX_LOCKED | SX_NOTRECURSED: 861 case SX_LOCKED | SX_RECURSED: 862#ifdef WITNESS 863 witness_assert(&sx->lock_object, what, file, line); 864#else 865 /* 866 * If some other thread has an exclusive lock or we 867 * have one and are asserting a shared lock, fail. 868 * Also, if no one has a lock at all, fail. 869 */ 870 if (sx->sx_lock == SX_LOCK_UNLOCKED || 871 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 872 sx_xholder(sx) != curthread))) 873 panic("Lock %s not %slocked @ %s:%d\n", 874 sx->lock_object.lo_name, slocked ? "share " : "", 875 file, line); 876 877 if (!(sx->sx_lock & SX_LOCK_SHARED)) { 878 if (sx_recursed(sx)) { 879 if (what & SX_NOTRECURSED) 880 panic("Lock %s recursed @ %s:%d\n", 881 sx->lock_object.lo_name, file, 882 line); 883 } else if (what & SX_RECURSED) 884 panic("Lock %s not recursed @ %s:%d\n", 885 sx->lock_object.lo_name, file, line); 886 } 887#endif 888 break; 889 case SX_XLOCKED: 890 case SX_XLOCKED | SX_NOTRECURSED: 891 case SX_XLOCKED | SX_RECURSED: 892 if (sx_xholder(sx) != curthread) 893 panic("Lock %s not exclusively locked @ %s:%d\n", 894 sx->lock_object.lo_name, file, line); 895 if (sx_recursed(sx)) { 896 if (what & SX_NOTRECURSED) 897 panic("Lock %s recursed @ %s:%d\n", 898 sx->lock_object.lo_name, file, line); 899 } else if (what & SX_RECURSED) 900 panic("Lock %s not recursed @ %s:%d\n", 901 sx->lock_object.lo_name, file, line); 902 break; 903 case SX_UNLOCKED: 904#ifdef WITNESS 905 witness_assert(&sx->lock_object, what, file, line); 906#else 907 /* 908 * If we hold an exclusve lock fail. We can't 909 * reliably check to see if we hold a shared lock or 910 * not. 911 */ 912 if (sx_xholder(sx) == curthread) 913 panic("Lock %s exclusively locked @ %s:%d\n", 914 sx->lock_object.lo_name, file, line); 915#endif 916 break; 917 default: 918 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 919 line); 920 } 921} 922#endif /* INVARIANT_SUPPORT */ 923 924#ifdef DDB 925static void 926db_show_sx(struct lock_object *lock) 927{ 928 struct thread *td; 929 struct sx *sx; 930 931 sx = (struct sx *)lock; 932 933 db_printf(" state: "); 934 if (sx->sx_lock == SX_LOCK_UNLOCKED) 935 db_printf("UNLOCKED\n");
|
919 else if (sx->sx_lock & SX_LOCK_SHARED)
| 936 else if (sx->sx_lock == SX_LOCK_DESTROYED) { 937 db_printf("DESTROYED\n"); 938 return; 939 } else if (sx->sx_lock & SX_LOCK_SHARED)
|
920 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 921 else { 922 td = sx_xholder(sx); 923 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 924 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 925 if (sx_recursed(sx)) 926 db_printf(" recursed: %d\n", sx->sx_recurse); 927 } 928 929 db_printf(" waiters: "); 930 switch(sx->sx_lock & 931 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 932 case SX_LOCK_SHARED_WAITERS: 933 db_printf("shared\n"); 934 break; 935 case SX_LOCK_EXCLUSIVE_WAITERS: 936 db_printf("exclusive\n"); 937 break; 938 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 939 db_printf("exclusive and shared\n"); 940 break; 941 default: 942 db_printf("none\n"); 943 } 944} 945 946/* 947 * Check to see if a thread that is blocked on a sleep queue is actually 948 * blocked on an sx lock. If so, output some details and return true. 949 * If the lock has an exclusive owner, return that in *ownerp. 950 */ 951int 952sx_chain(struct thread *td, struct thread **ownerp) 953{ 954 struct sx *sx; 955 956 /* 957 * Check to see if this thread is blocked on an sx lock. 958 * First, we check the lock class. If that is ok, then we 959 * compare the lock name against the wait message. 960 */ 961 sx = td->td_wchan; 962 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 963 sx->lock_object.lo_name != td->td_wmesg) 964 return (0); 965 966 /* We think we have an sx lock, so output some details. */ 967 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 968 *ownerp = sx_xholder(sx); 969 if (sx->sx_lock & SX_LOCK_SHARED) 970 db_printf("SLOCK (count %ju)\n", 971 (uintmax_t)SX_SHARERS(sx->sx_lock)); 972 else 973 db_printf("XLOCK\n"); 974 return (1); 975} 976#endif
| 940 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 941 else { 942 td = sx_xholder(sx); 943 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 944 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 945 if (sx_recursed(sx)) 946 db_printf(" recursed: %d\n", sx->sx_recurse); 947 } 948 949 db_printf(" waiters: "); 950 switch(sx->sx_lock & 951 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 952 case SX_LOCK_SHARED_WAITERS: 953 db_printf("shared\n"); 954 break; 955 case SX_LOCK_EXCLUSIVE_WAITERS: 956 db_printf("exclusive\n"); 957 break; 958 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 959 db_printf("exclusive and shared\n"); 960 break; 961 default: 962 db_printf("none\n"); 963 } 964} 965 966/* 967 * Check to see if a thread that is blocked on a sleep queue is actually 968 * blocked on an sx lock. If so, output some details and return true. 969 * If the lock has an exclusive owner, return that in *ownerp. 970 */ 971int 972sx_chain(struct thread *td, struct thread **ownerp) 973{ 974 struct sx *sx; 975 976 /* 977 * Check to see if this thread is blocked on an sx lock. 978 * First, we check the lock class. If that is ok, then we 979 * compare the lock name against the wait message. 980 */ 981 sx = td->td_wchan; 982 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 983 sx->lock_object.lo_name != td->td_wmesg) 984 return (0); 985 986 /* We think we have an sx lock, so output some details. */ 987 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 988 *ownerp = sx_xholder(sx); 989 if (sx->sx_lock & SX_LOCK_SHARED) 990 db_printf("SLOCK (count %ju)\n", 991 (uintmax_t)SX_SHARERS(sx->sx_lock)); 992 else 993 db_printf("XLOCK\n"); 994 return (1); 995} 996#endif
|