kern_lock.c revision 75472
1/* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD: head/sys/kern/kern_lock.c 75472 2001-04-13 10:15:53Z alfred $ 42 */ 43 44#include <sys/param.h> 45#include <sys/proc.h> 46#include <sys/kernel.h> 47#include <sys/lock.h> 48#include <sys/malloc.h> 49#include <sys/mutex.h> 50#include <sys/systm.h> 51 52/* 53 * Locking primitives implementation. 54 * Locks provide shared/exclusive sychronization. 55 */ 56 57#define LOCK_WAIT_TIME 100 58#define LOCK_SAMPLE_WAIT 7 59 60#if defined(DIAGNOSTIC) 61#define LOCK_INLINE 62#else 63#define LOCK_INLINE __inline 64#endif 65 66#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 67 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 68 69/* 70 * Mutex array variables. Rather than each lockmgr lock having its own mutex, 71 * share a fixed (at boot time) number of mutexes across all lockmgr locks in 72 * order to keep sizeof(struct lock) down. 73 */ 74extern int lock_nmtx; 75int lock_mtx_selector; 76struct mtx *lock_mtx_array; 77static struct mtx lock_mtx; 78 79static int acquire(struct lock *lkp, int extflags, int wanted); 80static int apause(struct lock *lkp, int flags); 81static int acquiredrain(struct lock *lkp, int extflags) ; 82 83static void 84lockmgr_init(void *dummy __unused) 85{ 86 int i; 87 88 /* 89 * Initialize the lockmgr protection mutex if it hasn't already been 90 * done. Unless something changes about kernel startup order, VM 91 * initialization will always cause this mutex to already be 92 * initialized in a call to lockinit(). 93 */ 94 if (lock_mtx_selector == 0) 95 mtx_init(&lock_mtx, "lockmgr", MTX_DEF); 96 else { 97 /* 98 * This is necessary if (lock_nmtx == 1) and doesn't hurt 99 * otherwise. 100 */ 101 lock_mtx_selector = 0; 102 } 103 104 lock_mtx_array = (struct mtx *)malloc(sizeof(struct mtx) * lock_nmtx, 105 M_CACHE, M_WAITOK); 106 for (i = 0; i < lock_nmtx; i++) 107 mtx_init(&lock_mtx_array[i], "lockmgr interlock", MTX_DEF); 108} 109SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL) 110 111static LOCK_INLINE void 112sharelock(struct lock *lkp, int incr) { 113 lkp->lk_flags |= LK_SHARE_NONZERO; 114 lkp->lk_sharecount += incr; 115} 116 117static LOCK_INLINE void 118shareunlock(struct lock *lkp, int decr) { 119 120 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 121 122 if (lkp->lk_sharecount == decr) { 123 lkp->lk_flags &= ~LK_SHARE_NONZERO; 124 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 125 wakeup(lkp); 126 } 127 lkp->lk_sharecount = 0; 128 } else { 129 lkp->lk_sharecount -= decr; 130 } 131} 132 133/* 134 * This is the waitloop optimization. 135 */ 136static int 137apause(struct lock *lkp, int flags) 138{ 139#ifdef SMP 140 int i, lock_wait; 141#endif 142 143 if ((lkp->lk_flags & flags) == 0) 144 return 0; 145#ifdef SMP 146 for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { 147 mtx_unlock(lkp->lk_interlock); 148 for (i = LOCK_SAMPLE_WAIT; i > 0; i--) 149 if ((lkp->lk_flags & flags) == 0) 150 break; 151 mtx_lock(lkp->lk_interlock); 152 if ((lkp->lk_flags & flags) == 0) 153 return 0; 154 } 155#endif 156 return 1; 157} 158 159static int 160acquire(struct lock *lkp, int extflags, int wanted) { 161 int s, error; 162 163 CTR3(KTR_LOCKMGR, 164 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n", 165 lkp, extflags, wanted); 166 167 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 168 return EBUSY; 169 } 170 171 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 172 error = apause(lkp, wanted); 173 if (error == 0) 174 return 0; 175 } 176 177 s = splhigh(); 178 while ((lkp->lk_flags & wanted) != 0) { 179 lkp->lk_flags |= LK_WAIT_NONZERO; 180 lkp->lk_waitcount++; 181 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 182 lkp->lk_wmesg, lkp->lk_timo); 183 if (lkp->lk_waitcount == 1) { 184 lkp->lk_flags &= ~LK_WAIT_NONZERO; 185 lkp->lk_waitcount = 0; 186 } else { 187 lkp->lk_waitcount--; 188 } 189 if (error) { 190 splx(s); 191 return error; 192 } 193 if (extflags & LK_SLEEPFAIL) { 194 splx(s); 195 return ENOLCK; 196 } 197 } 198 splx(s); 199 return 0; 200} 201 202/* 203 * Set, change, or release a lock. 204 * 205 * Shared requests increment the shared count. Exclusive requests set the 206 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 207 * accepted shared locks and shared-to-exclusive upgrades to go away. 208 */ 209int 210#ifndef DEBUG_LOCKS 211lockmgr(lkp, flags, interlkp, p) 212#else 213debuglockmgr(lkp, flags, interlkp, p, name, file, line) 214#endif 215 struct lock *lkp; 216 u_int flags; 217 struct mtx *interlkp; 218 struct proc *p; 219#ifdef DEBUG_LOCKS 220 const char *name; /* Name of lock function */ 221 const char *file; /* Name of file call is from */ 222 int line; /* Line number in file */ 223#endif 224{ 225 int error; 226 pid_t pid; 227 int extflags, lockflags; 228 229 CTR5(KTR_LOCKMGR, 230 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, " 231 "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p); 232 233 error = 0; 234 if (p == NULL) 235 pid = LK_KERNPROC; 236 else 237 pid = p->p_pid; 238 239 mtx_lock(lkp->lk_interlock); 240 if (flags & LK_INTERLOCK) 241 mtx_unlock(interlkp); 242 243 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 244 245 switch (flags & LK_TYPE_MASK) { 246 247 case LK_SHARED: 248 /* 249 * If we are not the exclusive lock holder, we have to block 250 * while there is an exclusive lock holder or while an 251 * exclusive lock request or upgrade request is in progress. 252 * 253 * However, if P_DEADLKTREAT is set, we override exclusive 254 * lock requests or upgrade requests ( but not the exclusive 255 * lock itself ). 256 */ 257 if (lkp->lk_lockholder != pid) { 258 lockflags = LK_HAVE_EXCL; 259 if (p) { 260 PROC_LOCK(p); 261 if (!(p->p_flag & P_DEADLKTREAT)) { 262 lockflags |= LK_WANT_EXCL | 263 LK_WANT_UPGRADE; 264 } 265 PROC_UNLOCK(p); 266 } 267 error = acquire(lkp, extflags, lockflags); 268 if (error) 269 break; 270 sharelock(lkp, 1); 271 break; 272 } 273 /* 274 * We hold an exclusive lock, so downgrade it to shared. 275 * An alternative would be to fail with EDEADLK. 276 */ 277 sharelock(lkp, 1); 278 /* fall into downgrade */ 279 280 case LK_DOWNGRADE: 281 KASSERT(lkp->lk_lockholder == pid && lkp->lk_exclusivecount != 0, 282 ("lockmgr: not holding exclusive lock " 283 "(owner pid (%d) != pid (%d), exlcnt (%d) != 0", 284 lkp->lk_lockholder, pid, lkp->lk_exclusivecount)); 285 sharelock(lkp, lkp->lk_exclusivecount); 286 lkp->lk_exclusivecount = 0; 287 lkp->lk_flags &= ~LK_HAVE_EXCL; 288 lkp->lk_lockholder = LK_NOPROC; 289 if (lkp->lk_waitcount) 290 wakeup((void *)lkp); 291 break; 292 293 case LK_EXCLUPGRADE: 294 /* 295 * If another process is ahead of us to get an upgrade, 296 * then we want to fail rather than have an intervening 297 * exclusive access. 298 */ 299 if (lkp->lk_flags & LK_WANT_UPGRADE) { 300 shareunlock(lkp, 1); 301 error = EBUSY; 302 break; 303 } 304 /* fall into normal upgrade */ 305 306 case LK_UPGRADE: 307 /* 308 * Upgrade a shared lock to an exclusive one. If another 309 * shared lock has already requested an upgrade to an 310 * exclusive lock, our shared lock is released and an 311 * exclusive lock is requested (which will be granted 312 * after the upgrade). If we return an error, the file 313 * will always be unlocked. 314 */ 315 if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) 316 panic("lockmgr: upgrade exclusive lock"); 317 shareunlock(lkp, 1); 318 /* 319 * If we are just polling, check to see if we will block. 320 */ 321 if ((extflags & LK_NOWAIT) && 322 ((lkp->lk_flags & LK_WANT_UPGRADE) || 323 lkp->lk_sharecount > 1)) { 324 error = EBUSY; 325 break; 326 } 327 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 328 /* 329 * We are first shared lock to request an upgrade, so 330 * request upgrade and wait for the shared count to 331 * drop to zero, then take exclusive lock. 332 */ 333 lkp->lk_flags |= LK_WANT_UPGRADE; 334 error = acquire(lkp, extflags, LK_SHARE_NONZERO); 335 lkp->lk_flags &= ~LK_WANT_UPGRADE; 336 337 if (error) 338 break; 339 lkp->lk_flags |= LK_HAVE_EXCL; 340 lkp->lk_lockholder = pid; 341 if (lkp->lk_exclusivecount != 0) 342 panic("lockmgr: non-zero exclusive count"); 343 lkp->lk_exclusivecount = 1; 344#if defined(DEBUG_LOCKS) 345 lkp->lk_filename = file; 346 lkp->lk_lineno = line; 347 lkp->lk_lockername = name; 348#endif 349 break; 350 } 351 /* 352 * Someone else has requested upgrade. Release our shared 353 * lock, awaken upgrade requestor if we are the last shared 354 * lock, then request an exclusive lock. 355 */ 356 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 357 LK_WAIT_NONZERO) 358 wakeup((void *)lkp); 359 /* fall into exclusive request */ 360 361 case LK_EXCLUSIVE: 362 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 363 /* 364 * Recursive lock. 365 */ 366 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 367 panic("lockmgr: locking against myself"); 368 if ((extflags & LK_CANRECURSE) != 0) { 369 lkp->lk_exclusivecount++; 370 break; 371 } 372 } 373 /* 374 * If we are just polling, check to see if we will sleep. 375 */ 376 if ((extflags & LK_NOWAIT) && 377 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 378 error = EBUSY; 379 break; 380 } 381 /* 382 * Try to acquire the want_exclusive flag. 383 */ 384 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 385 if (error) 386 break; 387 lkp->lk_flags |= LK_WANT_EXCL; 388 /* 389 * Wait for shared locks and upgrades to finish. 390 */ 391 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 392 lkp->lk_flags &= ~LK_WANT_EXCL; 393 if (error) 394 break; 395 lkp->lk_flags |= LK_HAVE_EXCL; 396 lkp->lk_lockholder = pid; 397 if (lkp->lk_exclusivecount != 0) 398 panic("lockmgr: non-zero exclusive count"); 399 lkp->lk_exclusivecount = 1; 400#if defined(DEBUG_LOCKS) 401 lkp->lk_filename = file; 402 lkp->lk_lineno = line; 403 lkp->lk_lockername = name; 404#endif 405 break; 406 407 case LK_RELEASE: 408 if (lkp->lk_exclusivecount != 0) { 409 if (lkp->lk_lockholder != pid && 410 lkp->lk_lockholder != LK_KERNPROC) { 411 panic("lockmgr: pid %d, not %s %d unlocking", 412 pid, "exclusive lock holder", 413 lkp->lk_lockholder); 414 } 415 if (lkp->lk_exclusivecount == 1) { 416 lkp->lk_flags &= ~LK_HAVE_EXCL; 417 lkp->lk_lockholder = LK_NOPROC; 418 lkp->lk_exclusivecount = 0; 419 } else { 420 lkp->lk_exclusivecount--; 421 } 422 } else if (lkp->lk_flags & LK_SHARE_NONZERO) 423 shareunlock(lkp, 1); 424 if (lkp->lk_flags & LK_WAIT_NONZERO) 425 wakeup((void *)lkp); 426 break; 427 428 case LK_DRAIN: 429 /* 430 * Check that we do not already hold the lock, as it can 431 * never drain if we do. Unfortunately, we have no way to 432 * check for holding a shared lock, but at least we can 433 * check for an exclusive one. 434 */ 435 if (lkp->lk_lockholder == pid) 436 panic("lockmgr: draining against myself"); 437 438 error = acquiredrain(lkp, extflags); 439 if (error) 440 break; 441 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 442 lkp->lk_lockholder = pid; 443 lkp->lk_exclusivecount = 1; 444#if defined(DEBUG_LOCKS) 445 lkp->lk_filename = file; 446 lkp->lk_lineno = line; 447 lkp->lk_lockername = name; 448#endif 449 break; 450 451 default: 452 mtx_unlock(lkp->lk_interlock); 453 panic("lockmgr: unknown locktype request %d", 454 flags & LK_TYPE_MASK); 455 /* NOTREACHED */ 456 } 457 if ((lkp->lk_flags & LK_WAITDRAIN) && 458 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 459 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 460 lkp->lk_flags &= ~LK_WAITDRAIN; 461 wakeup((void *)&lkp->lk_flags); 462 } 463 mtx_unlock(lkp->lk_interlock); 464 return (error); 465} 466 467static int 468acquiredrain(struct lock *lkp, int extflags) { 469 int error; 470 471 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 472 return EBUSY; 473 } 474 475 error = apause(lkp, LK_ALL); 476 if (error == 0) 477 return 0; 478 479 while (lkp->lk_flags & LK_ALL) { 480 lkp->lk_flags |= LK_WAITDRAIN; 481 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 482 lkp->lk_wmesg, lkp->lk_timo); 483 if (error) 484 return error; 485 if (extflags & LK_SLEEPFAIL) { 486 return ENOLCK; 487 } 488 } 489 return 0; 490} 491 492/* 493 * Initialize a lock; required before use. 494 */ 495void 496lockinit(lkp, prio, wmesg, timo, flags) 497 struct lock *lkp; 498 int prio; 499 char *wmesg; 500 int timo; 501 int flags; 502{ 503 CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 504 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 505 506 if (lock_mtx_array != NULL) { 507 mtx_lock(&lock_mtx); 508 lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector]; 509 lock_mtx_selector++; 510 if (lock_mtx_selector == lock_nmtx) 511 lock_mtx_selector = 0; 512 mtx_unlock(&lock_mtx); 513 } else { 514 /* 515 * Giving lockmgr locks that are initialized during boot a 516 * pointer to the internal lockmgr mutex is safe, since the 517 * lockmgr code itself doesn't call lockinit() (which could 518 * cause mutex recursion). 519 */ 520 if (lock_mtx_selector == 0) { 521 /* 522 * This case only happens during kernel bootstrapping, 523 * so there's no reason to protect modification of 524 * lock_mtx_selector or lock_mtx. 525 */ 526 mtx_init(&lock_mtx, "lockmgr", MTX_DEF); 527 lock_mtx_selector = 1; 528 } 529 lkp->lk_interlock = &lock_mtx; 530 } 531 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 532 lkp->lk_sharecount = 0; 533 lkp->lk_waitcount = 0; 534 lkp->lk_exclusivecount = 0; 535 lkp->lk_prio = prio; 536 lkp->lk_wmesg = wmesg; 537 lkp->lk_timo = timo; 538 lkp->lk_lockholder = LK_NOPROC; 539} 540 541/* 542 * Destroy a lock. 543 */ 544void 545lockdestroy(lkp) 546 struct lock *lkp; 547{ 548 CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 549 lkp, lkp->lk_wmesg); 550} 551 552/* 553 * Determine the status of a lock. 554 */ 555int 556lockstatus(lkp, p) 557 struct lock *lkp; 558 struct proc *p; 559{ 560 int lock_type = 0; 561 562 mtx_lock(lkp->lk_interlock); 563 if (lkp->lk_exclusivecount != 0) { 564 if (p == NULL || lkp->lk_lockholder == p->p_pid) 565 lock_type = LK_EXCLUSIVE; 566 else 567 lock_type = LK_EXCLOTHER; 568 } else if (lkp->lk_sharecount != 0) 569 lock_type = LK_SHARED; 570 mtx_unlock(lkp->lk_interlock); 571 return (lock_type); 572} 573 574/* 575 * Determine the number of holders of a lock. 576 */ 577int 578lockcount(lkp) 579 struct lock *lkp; 580{ 581 int count; 582 583 mtx_lock(lkp->lk_interlock); 584 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 585 mtx_unlock(lkp->lk_interlock); 586 return (count); 587} 588 589/* 590 * Print out information about state of a lock. Used by VOP_PRINT 591 * routines to display status about contained locks. 592 */ 593void 594lockmgr_printinfo(lkp) 595 struct lock *lkp; 596{ 597 598 if (lkp->lk_sharecount) 599 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 600 lkp->lk_sharecount); 601 else if (lkp->lk_flags & LK_HAVE_EXCL) 602 printf(" lock type %s: EXCL (count %d) by pid %d", 603 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 604 if (lkp->lk_waitcount > 0) 605 printf(" with %d pending", lkp->lk_waitcount); 606} 607