kern_lock.c revision 58132
1/* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD: head/sys/kern/kern_lock.c 58132 2000-03-16 08:51:55Z phk $ 42 */ 43 44#include "opt_lint.h" 45 46#include <sys/param.h> 47#include <sys/proc.h> 48#include <sys/lock.h> 49#include <sys/systm.h> 50 51/* 52 * Locking primitives implementation. 53 * Locks provide shared/exclusive sychronization. 54 */ 55 56#ifdef SIMPLELOCK_DEBUG 57#define COUNT(p, x) if (p) (p)->p_locks += (x) 58#else 59#define COUNT(p, x) 60#endif 61 62#define LOCK_WAIT_TIME 100 63#define LOCK_SAMPLE_WAIT 7 64 65#if defined(DIAGNOSTIC) 66#define LOCK_INLINE 67#else 68#define LOCK_INLINE __inline 69#endif 70 71#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 72 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 73 74static int acquire(struct lock *lkp, int extflags, int wanted); 75static int apause(struct lock *lkp, int flags); 76static int acquiredrain(struct lock *lkp, int extflags) ; 77 78static LOCK_INLINE void 79sharelock(struct lock *lkp, int incr) { 80 lkp->lk_flags |= LK_SHARE_NONZERO; 81 lkp->lk_sharecount += incr; 82} 83 84static LOCK_INLINE void 85shareunlock(struct lock *lkp, int decr) { 86 87 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 88 89 if (lkp->lk_sharecount == decr) { 90 lkp->lk_flags &= ~LK_SHARE_NONZERO; 91 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 92 wakeup(lkp); 93 } 94 lkp->lk_sharecount = 0; 95 } else { 96 lkp->lk_sharecount -= decr; 97 } 98} 99 100/* 101 * This is the waitloop optimization, and note for this to work 102 * simple_lock and simple_unlock should be subroutines to avoid 103 * optimization troubles. 104 */ 105static int 106apause(struct lock *lkp, int flags) 107{ 108#ifdef SMP 109 int i, lock_wait; 110#endif 111 112 if ((lkp->lk_flags & flags) == 0) 113 return 0; 114#ifdef SMP 115 for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { 116 simple_unlock(&lkp->lk_interlock); 117 for (i = LOCK_SAMPLE_WAIT; i > 0; i--) 118 if ((lkp->lk_flags & flags) == 0) 119 break; 120 simple_lock(&lkp->lk_interlock); 121 if ((lkp->lk_flags & flags) == 0) 122 return 0; 123 } 124#endif 125 return 1; 126} 127 128static int 129acquire(struct lock *lkp, int extflags, int wanted) { 130 int s, error; 131 132 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 133 return EBUSY; 134 } 135 136 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 137 error = apause(lkp, wanted); 138 if (error == 0) 139 return 0; 140 } 141 142 s = splhigh(); 143 while ((lkp->lk_flags & wanted) != 0) { 144 lkp->lk_flags |= LK_WAIT_NONZERO; 145 lkp->lk_waitcount++; 146 simple_unlock(&lkp->lk_interlock); 147 error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo); 148 simple_lock(&lkp->lk_interlock); 149 if (lkp->lk_waitcount == 1) { 150 lkp->lk_flags &= ~LK_WAIT_NONZERO; 151 lkp->lk_waitcount = 0; 152 } else { 153 lkp->lk_waitcount--; 154 } 155 if (error) { 156 splx(s); 157 return error; 158 } 159 if (extflags & LK_SLEEPFAIL) { 160 splx(s); 161 return ENOLCK; 162 } 163 } 164 splx(s); 165 return 0; 166} 167 168/* 169 * Set, change, or release a lock. 170 * 171 * Shared requests increment the shared count. Exclusive requests set the 172 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 173 * accepted shared locks and shared-to-exclusive upgrades to go away. 174 */ 175int 176#ifndef DEBUG_LOCKS 177lockmgr(lkp, flags, interlkp, p) 178#else 179debuglockmgr(lkp, flags, interlkp, p, name, file, line) 180#endif 181 struct lock *lkp; 182 u_int flags; 183 struct simplelock *interlkp; 184 struct proc *p; 185#ifdef DEBUG_LOCKS 186 const char *name; /* Name of lock function */ 187 const char *file; /* Name of file call is from */ 188 int line; /* Line number in file */ 189#endif 190{ 191 int error; 192 pid_t pid; 193 int extflags; 194 195 error = 0; 196 if (p == NULL) 197 pid = LK_KERNPROC; 198 else 199 pid = p->p_pid; 200 201 simple_lock(&lkp->lk_interlock); 202 if (flags & LK_INTERLOCK) 203 simple_unlock(interlkp); 204 205 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 206 207 switch (flags & LK_TYPE_MASK) { 208 209 case LK_SHARED: 210 /* 211 * If we are not the exclusive lock holder, we have to block 212 * while there is an exclusive lock holder or while an 213 * exclusive lock request or upgrade request is in progress. 214 * 215 * However, if P_DEADLKTREAT is set, we override exclusive 216 * lock requests or upgrade requests ( but not the exclusive 217 * lock itself ). 218 */ 219 if (lkp->lk_lockholder != pid) { 220 if (p && (p->p_flag & P_DEADLKTREAT)) { 221 error = acquire( 222 lkp, 223 extflags, 224 LK_HAVE_EXCL 225 ); 226 } else { 227 error = acquire( 228 lkp, 229 extflags, 230 LK_HAVE_EXCL | LK_WANT_EXCL | 231 LK_WANT_UPGRADE 232 ); 233 } 234 if (error) 235 break; 236 sharelock(lkp, 1); 237 COUNT(p, 1); 238 break; 239 } 240 /* 241 * We hold an exclusive lock, so downgrade it to shared. 242 * An alternative would be to fail with EDEADLK. 243 */ 244 sharelock(lkp, 1); 245 COUNT(p, 1); 246 /* fall into downgrade */ 247 248 case LK_DOWNGRADE: 249 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 250 panic("lockmgr: not holding exclusive lock"); 251 sharelock(lkp, lkp->lk_exclusivecount); 252 lkp->lk_exclusivecount = 0; 253 lkp->lk_flags &= ~LK_HAVE_EXCL; 254 lkp->lk_lockholder = LK_NOPROC; 255 if (lkp->lk_waitcount) 256 wakeup((void *)lkp); 257 break; 258 259 case LK_EXCLUPGRADE: 260 /* 261 * If another process is ahead of us to get an upgrade, 262 * then we want to fail rather than have an intervening 263 * exclusive access. 264 */ 265 if (lkp->lk_flags & LK_WANT_UPGRADE) { 266 shareunlock(lkp, 1); 267 COUNT(p, -1); 268 error = EBUSY; 269 break; 270 } 271 /* fall into normal upgrade */ 272 273 case LK_UPGRADE: 274 /* 275 * Upgrade a shared lock to an exclusive one. If another 276 * shared lock has already requested an upgrade to an 277 * exclusive lock, our shared lock is released and an 278 * exclusive lock is requested (which will be granted 279 * after the upgrade). If we return an error, the file 280 * will always be unlocked. 281 */ 282 if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) 283 panic("lockmgr: upgrade exclusive lock"); 284 shareunlock(lkp, 1); 285 COUNT(p, -1); 286 /* 287 * If we are just polling, check to see if we will block. 288 */ 289 if ((extflags & LK_NOWAIT) && 290 ((lkp->lk_flags & LK_WANT_UPGRADE) || 291 lkp->lk_sharecount > 1)) { 292 error = EBUSY; 293 break; 294 } 295 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 296 /* 297 * We are first shared lock to request an upgrade, so 298 * request upgrade and wait for the shared count to 299 * drop to zero, then take exclusive lock. 300 */ 301 lkp->lk_flags |= LK_WANT_UPGRADE; 302 error = acquire(lkp, extflags, LK_SHARE_NONZERO); 303 lkp->lk_flags &= ~LK_WANT_UPGRADE; 304 305 if (error) 306 break; 307 lkp->lk_flags |= LK_HAVE_EXCL; 308 lkp->lk_lockholder = pid; 309 if (lkp->lk_exclusivecount != 0) 310 panic("lockmgr: non-zero exclusive count"); 311 lkp->lk_exclusivecount = 1; 312#if defined(DEBUG_LOCKS) 313 lkp->lk_filename = file; 314 lkp->lk_lineno = line; 315 lkp->lk_lockername = name; 316#endif 317 COUNT(p, 1); 318 break; 319 } 320 /* 321 * Someone else has requested upgrade. Release our shared 322 * lock, awaken upgrade requestor if we are the last shared 323 * lock, then request an exclusive lock. 324 */ 325 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 326 LK_WAIT_NONZERO) 327 wakeup((void *)lkp); 328 /* fall into exclusive request */ 329 330 case LK_EXCLUSIVE: 331 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 332 /* 333 * Recursive lock. 334 */ 335 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 336 panic("lockmgr: locking against myself"); 337 if ((extflags & LK_CANRECURSE) != 0) { 338 lkp->lk_exclusivecount++; 339 COUNT(p, 1); 340 break; 341 } 342 } 343 /* 344 * If we are just polling, check to see if we will sleep. 345 */ 346 if ((extflags & LK_NOWAIT) && 347 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 348 error = EBUSY; 349 break; 350 } 351 /* 352 * Try to acquire the want_exclusive flag. 353 */ 354 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 355 if (error) 356 break; 357 lkp->lk_flags |= LK_WANT_EXCL; 358 /* 359 * Wait for shared locks and upgrades to finish. 360 */ 361 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 362 lkp->lk_flags &= ~LK_WANT_EXCL; 363 if (error) 364 break; 365 lkp->lk_flags |= LK_HAVE_EXCL; 366 lkp->lk_lockholder = pid; 367 if (lkp->lk_exclusivecount != 0) 368 panic("lockmgr: non-zero exclusive count"); 369 lkp->lk_exclusivecount = 1; 370#if defined(DEBUG_LOCKS) 371 lkp->lk_filename = file; 372 lkp->lk_lineno = line; 373 lkp->lk_lockername = name; 374#endif 375 COUNT(p, 1); 376 break; 377 378 case LK_RELEASE: 379 if (lkp->lk_exclusivecount != 0) { 380 if (lkp->lk_lockholder != pid && 381 lkp->lk_lockholder != LK_KERNPROC) { 382 panic("lockmgr: pid %d, not %s %d unlocking", 383 pid, "exclusive lock holder", 384 lkp->lk_lockholder); 385 } 386 if (lkp->lk_lockholder != LK_KERNPROC) { 387 COUNT(p, -1); 388 } 389 if (lkp->lk_exclusivecount == 1) { 390 lkp->lk_flags &= ~LK_HAVE_EXCL; 391 lkp->lk_lockholder = LK_NOPROC; 392 lkp->lk_exclusivecount = 0; 393 } else { 394 lkp->lk_exclusivecount--; 395 } 396 } else if (lkp->lk_flags & LK_SHARE_NONZERO) { 397 shareunlock(lkp, 1); 398 COUNT(p, -1); 399 } 400 if (lkp->lk_flags & LK_WAIT_NONZERO) 401 wakeup((void *)lkp); 402 break; 403 404 case LK_DRAIN: 405 /* 406 * Check that we do not already hold the lock, as it can 407 * never drain if we do. Unfortunately, we have no way to 408 * check for holding a shared lock, but at least we can 409 * check for an exclusive one. 410 */ 411 if (lkp->lk_lockholder == pid) 412 panic("lockmgr: draining against myself"); 413 414 error = acquiredrain(lkp, extflags); 415 if (error) 416 break; 417 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 418 lkp->lk_lockholder = pid; 419 lkp->lk_exclusivecount = 1; 420#if defined(DEBUG_LOCKS) 421 lkp->lk_filename = file; 422 lkp->lk_lineno = line; 423 lkp->lk_lockername = name; 424#endif 425 COUNT(p, 1); 426 break; 427 428 default: 429 simple_unlock(&lkp->lk_interlock); 430 panic("lockmgr: unknown locktype request %d", 431 flags & LK_TYPE_MASK); 432 /* NOTREACHED */ 433 } 434 if ((lkp->lk_flags & LK_WAITDRAIN) && 435 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 436 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 437 lkp->lk_flags &= ~LK_WAITDRAIN; 438 wakeup((void *)&lkp->lk_flags); 439 } 440 simple_unlock(&lkp->lk_interlock); 441 return (error); 442} 443 444static int 445acquiredrain(struct lock *lkp, int extflags) { 446 int error; 447 448 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 449 return EBUSY; 450 } 451 452 error = apause(lkp, LK_ALL); 453 if (error == 0) 454 return 0; 455 456 while (lkp->lk_flags & LK_ALL) { 457 lkp->lk_flags |= LK_WAITDRAIN; 458 simple_unlock(&lkp->lk_interlock); 459 error = tsleep(&lkp->lk_flags, lkp->lk_prio, 460 lkp->lk_wmesg, lkp->lk_timo); 461 simple_lock(&lkp->lk_interlock); 462 if (error) 463 return error; 464 if (extflags & LK_SLEEPFAIL) { 465 return ENOLCK; 466 } 467 } 468 return 0; 469} 470 471/* 472 * Initialize a lock; required before use. 473 */ 474void 475lockinit(lkp, prio, wmesg, timo, flags) 476 struct lock *lkp; 477 int prio; 478 char *wmesg; 479 int timo; 480 int flags; 481{ 482 483 simple_lock_init(&lkp->lk_interlock); 484 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 485 lkp->lk_sharecount = 0; 486 lkp->lk_waitcount = 0; 487 lkp->lk_exclusivecount = 0; 488 lkp->lk_prio = prio; 489 lkp->lk_wmesg = wmesg; 490 lkp->lk_timo = timo; 491 lkp->lk_lockholder = LK_NOPROC; 492} 493 494/* 495 * Determine the status of a lock. 496 */ 497int 498lockstatus(lkp, p) 499 struct lock *lkp; 500 struct proc *p; 501{ 502 int lock_type = 0; 503 504 simple_lock(&lkp->lk_interlock); 505 if (lkp->lk_exclusivecount != 0) { 506 if (p == NULL || lkp->lk_lockholder == p->p_pid) 507 lock_type = LK_EXCLUSIVE; 508 else 509 lock_type = LK_EXCLOTHER; 510 } else if (lkp->lk_sharecount != 0) 511 lock_type = LK_SHARED; 512 simple_unlock(&lkp->lk_interlock); 513 return (lock_type); 514} 515 516/* 517 * Determine the number of holders of a lock. 518 */ 519int 520lockcount(lkp) 521 struct lock *lkp; 522{ 523 int count; 524 525 simple_lock(&lkp->lk_interlock); 526 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 527 simple_unlock(&lkp->lk_interlock); 528 return (count); 529} 530 531/* 532 * Print out information about state of a lock. Used by VOP_PRINT 533 * routines to display status about contained locks. 534 */ 535void 536lockmgr_printinfo(lkp) 537 struct lock *lkp; 538{ 539 540 if (lkp->lk_sharecount) 541 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 542 lkp->lk_sharecount); 543 else if (lkp->lk_flags & LK_HAVE_EXCL) 544 printf(" lock type %s: EXCL (count %d) by pid %d", 545 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 546 if (lkp->lk_waitcount > 0) 547 printf(" with %d pending", lkp->lk_waitcount); 548} 549 550#if defined(SIMPLELOCK_DEBUG) && (NCPUS == 1 || defined(COMPILING_LINT)) 551#include <sys/kernel.h> 552#include <sys/sysctl.h> 553 554static int lockpausetime = 0; 555SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, ""); 556 557static int simplelockrecurse; 558 559/* 560 * Simple lock functions so that the debugger can see from whence 561 * they are being called. 562 */ 563void 564simple_lock_init(alp) 565 struct simplelock *alp; 566{ 567 568 alp->lock_data = 0; 569} 570 571void 572_simple_lock(alp, id, l) 573 struct simplelock *alp; 574 const char *id; 575 int l; 576{ 577 578 if (simplelockrecurse) 579 return; 580 if (alp->lock_data == 1) { 581 if (lockpausetime == -1) 582 panic("%s:%d: simple_lock: lock held", id, l); 583 printf("%s:%d: simple_lock: lock held\n", id, l); 584 if (lockpausetime == 1) { 585 Debugger("simple_lock"); 586 /*BACKTRACE(curproc); */ 587 } else if (lockpausetime > 1) { 588 printf("%s:%d: simple_lock: lock held...", id, l); 589 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 590 lockpausetime * hz); 591 printf(" continuing\n"); 592 } 593 } 594 alp->lock_data = 1; 595 if (curproc) 596 curproc->p_simple_locks++; 597} 598 599int 600_simple_lock_try(alp, id, l) 601 struct simplelock *alp; 602 const char *id; 603 int l; 604{ 605 606 if (alp->lock_data) 607 return (0); 608 if (simplelockrecurse) 609 return (1); 610 alp->lock_data = 1; 611 if (curproc) 612 curproc->p_simple_locks++; 613 return (1); 614} 615 616void 617_simple_unlock(alp, id, l) 618 struct simplelock *alp; 619 const char *id; 620 int l; 621{ 622 623 if (simplelockrecurse) 624 return; 625 if (alp->lock_data == 0) { 626 if (lockpausetime == -1) 627 panic("%s:%d: simple_unlock: lock not held", id, l); 628 printf("%s:%d: simple_unlock: lock not held\n", id, l); 629 if (lockpausetime == 1) { 630 Debugger("simple_unlock"); 631 /* BACKTRACE(curproc); */ 632 } else if (lockpausetime > 1) { 633 printf("%s:%d: simple_unlock: lock not held...", id, l); 634 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 635 lockpausetime * hz); 636 printf(" continuing\n"); 637 } 638 } 639 alp->lock_data = 0; 640 if (curproc) 641 curproc->p_simple_locks--; 642} 643#elif defined(SIMPLELOCK_DEBUG) 644#error "SIMPLELOCK_DEBUG is not compatible with SMP!" 645#endif /* SIMPLELOCK_DEBUG && NCPUS == 1 */ 646