kern_lock.c revision 72227
1/* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD: head/sys/kern/kern_lock.c 72227 2001-02-09 16:27:41Z jhb $ 42 */ 43 44#include <sys/param.h> 45#include <sys/proc.h> 46#include <sys/kernel.h> 47#include <sys/lock.h> 48#include <sys/malloc.h> 49#include <sys/mutex.h> 50#include <sys/systm.h> 51 52/* 53 * Locking primitives implementation. 54 * Locks provide shared/exclusive sychronization. 55 */ 56 57#define LOCK_WAIT_TIME 100 58#define LOCK_SAMPLE_WAIT 7 59 60#if defined(DIAGNOSTIC) 61#define LOCK_INLINE 62#else 63#define LOCK_INLINE __inline 64#endif 65 66#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 67 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 68 69/* 70 * Mutex array variables. Rather than each lockmgr lock having its own mutex, 71 * share a fixed (at boot time) number of mutexes across all lockmgr locks in 72 * order to keep sizeof(struct lock) down. 73 */ 74extern int lock_nmtx; 75int lock_mtx_selector; 76struct mtx *lock_mtx_array; 77static struct mtx lock_mtx; 78 79static int acquire(struct lock *lkp, int extflags, int wanted); 80static int apause(struct lock *lkp, int flags); 81static int acquiredrain(struct lock *lkp, int extflags) ; 82 83static void 84lockmgr_init(void *dummy __unused) 85{ 86 int i; 87 88 /* 89 * Initialize the lockmgr protection mutex if it hasn't already been 90 * done. Unless something changes about kernel startup order, VM 91 * initialization will always cause this mutex to already be 92 * initialized in a call to lockinit(). 93 */ 94 if (lock_mtx_selector == 0) 95 mtx_init(&lock_mtx, "lockmgr", MTX_DEF); 96 else { 97 /* 98 * This is necessary if (lock_nmtx == 1) and doesn't hurt 99 * otherwise. 100 */ 101 lock_mtx_selector = 0; 102 } 103 104 lock_mtx_array = (struct mtx *)malloc(sizeof(struct mtx) * lock_nmtx, 105 M_CACHE, M_WAITOK); 106 for (i = 0; i < lock_nmtx; i++) 107 mtx_init(&lock_mtx_array[i], "lockmgr interlock", MTX_DEF); 108} 109SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL) 110 111static LOCK_INLINE void 112sharelock(struct lock *lkp, int incr) { 113 lkp->lk_flags |= LK_SHARE_NONZERO; 114 lkp->lk_sharecount += incr; 115} 116 117static LOCK_INLINE void 118shareunlock(struct lock *lkp, int decr) { 119 120 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 121 122 if (lkp->lk_sharecount == decr) { 123 lkp->lk_flags &= ~LK_SHARE_NONZERO; 124 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 125 wakeup(lkp); 126 } 127 lkp->lk_sharecount = 0; 128 } else { 129 lkp->lk_sharecount -= decr; 130 } 131} 132 133/* 134 * This is the waitloop optimization. 135 */ 136static int 137apause(struct lock *lkp, int flags) 138{ 139#ifdef SMP 140 int i, lock_wait; 141#endif 142 143 if ((lkp->lk_flags & flags) == 0) 144 return 0; 145#ifdef SMP 146 for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { 147 mtx_unlock(lkp->lk_interlock); 148 for (i = LOCK_SAMPLE_WAIT; i > 0; i--) 149 if ((lkp->lk_flags & flags) == 0) 150 break; 151 mtx_lock(lkp->lk_interlock); 152 if ((lkp->lk_flags & flags) == 0) 153 return 0; 154 } 155#endif 156 return 1; 157} 158 159static int 160acquire(struct lock *lkp, int extflags, int wanted) { 161 int s, error; 162 163 CTR3(KTR_LOCKMGR, 164 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n", 165 lkp, extflags, wanted); 166 167 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 168 return EBUSY; 169 } 170 171 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 172 error = apause(lkp, wanted); 173 if (error == 0) 174 return 0; 175 } 176 177 s = splhigh(); 178 while ((lkp->lk_flags & wanted) != 0) { 179 lkp->lk_flags |= LK_WAIT_NONZERO; 180 lkp->lk_waitcount++; 181 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 182 lkp->lk_wmesg, lkp->lk_timo); 183 if (lkp->lk_waitcount == 1) { 184 lkp->lk_flags &= ~LK_WAIT_NONZERO; 185 lkp->lk_waitcount = 0; 186 } else { 187 lkp->lk_waitcount--; 188 } 189 if (error) { 190 splx(s); 191 return error; 192 } 193 if (extflags & LK_SLEEPFAIL) { 194 splx(s); 195 return ENOLCK; 196 } 197 } 198 splx(s); 199 return 0; 200} 201 202/* 203 * Set, change, or release a lock. 204 * 205 * Shared requests increment the shared count. Exclusive requests set the 206 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 207 * accepted shared locks and shared-to-exclusive upgrades to go away. 208 */ 209int 210#ifndef DEBUG_LOCKS 211lockmgr(lkp, flags, interlkp, p) 212#else 213debuglockmgr(lkp, flags, interlkp, p, name, file, line) 214#endif 215 struct lock *lkp; 216 u_int flags; 217 struct mtx *interlkp; 218 struct proc *p; 219#ifdef DEBUG_LOCKS 220 const char *name; /* Name of lock function */ 221 const char *file; /* Name of file call is from */ 222 int line; /* Line number in file */ 223#endif 224{ 225 int error; 226 pid_t pid; 227 int extflags, lockflags; 228 229 CTR5(KTR_LOCKMGR, 230 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, " 231 "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p); 232 233 error = 0; 234 if (p == NULL) 235 pid = LK_KERNPROC; 236 else 237 pid = p->p_pid; 238 239 mtx_lock(lkp->lk_interlock); 240 if (flags & LK_INTERLOCK) 241 mtx_unlock(interlkp); 242 243 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 244 245 switch (flags & LK_TYPE_MASK) { 246 247 case LK_SHARED: 248 /* 249 * If we are not the exclusive lock holder, we have to block 250 * while there is an exclusive lock holder or while an 251 * exclusive lock request or upgrade request is in progress. 252 * 253 * However, if P_DEADLKTREAT is set, we override exclusive 254 * lock requests or upgrade requests ( but not the exclusive 255 * lock itself ). 256 */ 257 if (lkp->lk_lockholder != pid) { 258 lockflags = LK_HAVE_EXCL; 259 if (p) { 260 PROC_LOCK(p); 261 if (!p->p_flag & P_DEADLKTREAT) { 262 lockflags |= LK_WANT_EXCL | 263 LK_WANT_UPGRADE; 264 } 265 PROC_UNLOCK(p); 266 } 267 error = acquire(lkp, extflags, lockflags); 268 if (error) 269 break; 270 sharelock(lkp, 1); 271 break; 272 } 273 /* 274 * We hold an exclusive lock, so downgrade it to shared. 275 * An alternative would be to fail with EDEADLK. 276 */ 277 sharelock(lkp, 1); 278 /* fall into downgrade */ 279 280 case LK_DOWNGRADE: 281 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 282 panic("lockmgr: not holding exclusive lock"); 283 sharelock(lkp, lkp->lk_exclusivecount); 284 lkp->lk_exclusivecount = 0; 285 lkp->lk_flags &= ~LK_HAVE_EXCL; 286 lkp->lk_lockholder = LK_NOPROC; 287 if (lkp->lk_waitcount) 288 wakeup((void *)lkp); 289 break; 290 291 case LK_EXCLUPGRADE: 292 /* 293 * If another process is ahead of us to get an upgrade, 294 * then we want to fail rather than have an intervening 295 * exclusive access. 296 */ 297 if (lkp->lk_flags & LK_WANT_UPGRADE) { 298 shareunlock(lkp, 1); 299 error = EBUSY; 300 break; 301 } 302 /* fall into normal upgrade */ 303 304 case LK_UPGRADE: 305 /* 306 * Upgrade a shared lock to an exclusive one. If another 307 * shared lock has already requested an upgrade to an 308 * exclusive lock, our shared lock is released and an 309 * exclusive lock is requested (which will be granted 310 * after the upgrade). If we return an error, the file 311 * will always be unlocked. 312 */ 313 if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) 314 panic("lockmgr: upgrade exclusive lock"); 315 shareunlock(lkp, 1); 316 /* 317 * If we are just polling, check to see if we will block. 318 */ 319 if ((extflags & LK_NOWAIT) && 320 ((lkp->lk_flags & LK_WANT_UPGRADE) || 321 lkp->lk_sharecount > 1)) { 322 error = EBUSY; 323 break; 324 } 325 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 326 /* 327 * We are first shared lock to request an upgrade, so 328 * request upgrade and wait for the shared count to 329 * drop to zero, then take exclusive lock. 330 */ 331 lkp->lk_flags |= LK_WANT_UPGRADE; 332 error = acquire(lkp, extflags, LK_SHARE_NONZERO); 333 lkp->lk_flags &= ~LK_WANT_UPGRADE; 334 335 if (error) 336 break; 337 lkp->lk_flags |= LK_HAVE_EXCL; 338 lkp->lk_lockholder = pid; 339 if (lkp->lk_exclusivecount != 0) 340 panic("lockmgr: non-zero exclusive count"); 341 lkp->lk_exclusivecount = 1; 342#if defined(DEBUG_LOCKS) 343 lkp->lk_filename = file; 344 lkp->lk_lineno = line; 345 lkp->lk_lockername = name; 346#endif 347 break; 348 } 349 /* 350 * Someone else has requested upgrade. Release our shared 351 * lock, awaken upgrade requestor if we are the last shared 352 * lock, then request an exclusive lock. 353 */ 354 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 355 LK_WAIT_NONZERO) 356 wakeup((void *)lkp); 357 /* fall into exclusive request */ 358 359 case LK_EXCLUSIVE: 360 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 361 /* 362 * Recursive lock. 363 */ 364 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 365 panic("lockmgr: locking against myself"); 366 if ((extflags & LK_CANRECURSE) != 0) { 367 lkp->lk_exclusivecount++; 368 break; 369 } 370 } 371 /* 372 * If we are just polling, check to see if we will sleep. 373 */ 374 if ((extflags & LK_NOWAIT) && 375 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 376 error = EBUSY; 377 break; 378 } 379 /* 380 * Try to acquire the want_exclusive flag. 381 */ 382 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 383 if (error) 384 break; 385 lkp->lk_flags |= LK_WANT_EXCL; 386 /* 387 * Wait for shared locks and upgrades to finish. 388 */ 389 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 390 lkp->lk_flags &= ~LK_WANT_EXCL; 391 if (error) 392 break; 393 lkp->lk_flags |= LK_HAVE_EXCL; 394 lkp->lk_lockholder = pid; 395 if (lkp->lk_exclusivecount != 0) 396 panic("lockmgr: non-zero exclusive count"); 397 lkp->lk_exclusivecount = 1; 398#if defined(DEBUG_LOCKS) 399 lkp->lk_filename = file; 400 lkp->lk_lineno = line; 401 lkp->lk_lockername = name; 402#endif 403 break; 404 405 case LK_RELEASE: 406 if (lkp->lk_exclusivecount != 0) { 407 if (lkp->lk_lockholder != pid && 408 lkp->lk_lockholder != LK_KERNPROC) { 409 panic("lockmgr: pid %d, not %s %d unlocking", 410 pid, "exclusive lock holder", 411 lkp->lk_lockholder); 412 } 413 if (lkp->lk_exclusivecount == 1) { 414 lkp->lk_flags &= ~LK_HAVE_EXCL; 415 lkp->lk_lockholder = LK_NOPROC; 416 lkp->lk_exclusivecount = 0; 417 } else { 418 lkp->lk_exclusivecount--; 419 } 420 } else if (lkp->lk_flags & LK_SHARE_NONZERO) 421 shareunlock(lkp, 1); 422 if (lkp->lk_flags & LK_WAIT_NONZERO) 423 wakeup((void *)lkp); 424 break; 425 426 case LK_DRAIN: 427 /* 428 * Check that we do not already hold the lock, as it can 429 * never drain if we do. Unfortunately, we have no way to 430 * check for holding a shared lock, but at least we can 431 * check for an exclusive one. 432 */ 433 if (lkp->lk_lockholder == pid) 434 panic("lockmgr: draining against myself"); 435 436 error = acquiredrain(lkp, extflags); 437 if (error) 438 break; 439 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 440 lkp->lk_lockholder = pid; 441 lkp->lk_exclusivecount = 1; 442#if defined(DEBUG_LOCKS) 443 lkp->lk_filename = file; 444 lkp->lk_lineno = line; 445 lkp->lk_lockername = name; 446#endif 447 break; 448 449 default: 450 mtx_unlock(lkp->lk_interlock); 451 panic("lockmgr: unknown locktype request %d", 452 flags & LK_TYPE_MASK); 453 /* NOTREACHED */ 454 } 455 if ((lkp->lk_flags & LK_WAITDRAIN) && 456 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 457 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 458 lkp->lk_flags &= ~LK_WAITDRAIN; 459 wakeup((void *)&lkp->lk_flags); 460 } 461 mtx_unlock(lkp->lk_interlock); 462 return (error); 463} 464 465static int 466acquiredrain(struct lock *lkp, int extflags) { 467 int error; 468 469 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 470 return EBUSY; 471 } 472 473 error = apause(lkp, LK_ALL); 474 if (error == 0) 475 return 0; 476 477 while (lkp->lk_flags & LK_ALL) { 478 lkp->lk_flags |= LK_WAITDRAIN; 479 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 480 lkp->lk_wmesg, lkp->lk_timo); 481 if (error) 482 return error; 483 if (extflags & LK_SLEEPFAIL) { 484 return ENOLCK; 485 } 486 } 487 return 0; 488} 489 490/* 491 * Initialize a lock; required before use. 492 */ 493void 494lockinit(lkp, prio, wmesg, timo, flags) 495 struct lock *lkp; 496 int prio; 497 char *wmesg; 498 int timo; 499 int flags; 500{ 501 CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 502 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 503 504 if (lock_mtx_array != NULL) { 505 mtx_lock(&lock_mtx); 506 lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector]; 507 lock_mtx_selector++; 508 if (lock_mtx_selector == lock_nmtx) 509 lock_mtx_selector = 0; 510 mtx_unlock(&lock_mtx); 511 } else { 512 /* 513 * Giving lockmgr locks that are initialized during boot a 514 * pointer to the internal lockmgr mutex is safe, since the 515 * lockmgr code itself doesn't call lockinit() (which could 516 * cause mutex recursion). 517 */ 518 if (lock_mtx_selector == 0) { 519 /* 520 * This case only happens during kernel bootstrapping, 521 * so there's no reason to protect modification of 522 * lock_mtx_selector or lock_mtx. 523 */ 524 mtx_init(&lock_mtx, "lockmgr", MTX_DEF); 525 lock_mtx_selector = 1; 526 } 527 lkp->lk_interlock = &lock_mtx; 528 } 529 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 530 lkp->lk_sharecount = 0; 531 lkp->lk_waitcount = 0; 532 lkp->lk_exclusivecount = 0; 533 lkp->lk_prio = prio; 534 lkp->lk_wmesg = wmesg; 535 lkp->lk_timo = timo; 536 lkp->lk_lockholder = LK_NOPROC; 537} 538 539/* 540 * Destroy a lock. 541 */ 542void 543lockdestroy(lkp) 544 struct lock *lkp; 545{ 546 CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 547 lkp, lkp->lk_wmesg); 548} 549 550/* 551 * Determine the status of a lock. 552 */ 553int 554lockstatus(lkp, p) 555 struct lock *lkp; 556 struct proc *p; 557{ 558 int lock_type = 0; 559 560 mtx_lock(lkp->lk_interlock); 561 if (lkp->lk_exclusivecount != 0) { 562 if (p == NULL || lkp->lk_lockholder == p->p_pid) 563 lock_type = LK_EXCLUSIVE; 564 else 565 lock_type = LK_EXCLOTHER; 566 } else if (lkp->lk_sharecount != 0) 567 lock_type = LK_SHARED; 568 mtx_unlock(lkp->lk_interlock); 569 return (lock_type); 570} 571 572/* 573 * Determine the number of holders of a lock. 574 */ 575int 576lockcount(lkp) 577 struct lock *lkp; 578{ 579 int count; 580 581 mtx_lock(lkp->lk_interlock); 582 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 583 mtx_unlock(lkp->lk_interlock); 584 return (count); 585} 586 587/* 588 * Print out information about state of a lock. Used by VOP_PRINT 589 * routines to display status about contained locks. 590 */ 591void 592lockmgr_printinfo(lkp) 593 struct lock *lkp; 594{ 595 596 if (lkp->lk_sharecount) 597 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 598 lkp->lk_sharecount); 599 else if (lkp->lk_flags & LK_HAVE_EXCL) 600 printf(" lock type %s: EXCL (count %d) by pid %d", 601 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 602 if (lkp->lk_waitcount > 0) 603 printf(" with %d pending", lkp->lk_waitcount); 604} 605