kern_lock.c revision 27894
1251881Speter/* 2251881Speter * Copyright (c) 1995 3251881Speter * The Regents of the University of California. All rights reserved. 4251881Speter * 5251881Speter * This code contains ideas from software contributed to Berkeley by 6251881Speter * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 7251881Speter * System project at Carnegie-Mellon University. 8251881Speter * 9251881Speter * Redistribution and use in source and binary forms, with or without 10251881Speter * modification, are permitted provided that the following conditions 11251881Speter * are met: 12251881Speter * 1. Redistributions of source code must retain the above copyright 13251881Speter * notice, this list of conditions and the following disclaimer. 14251881Speter * 2. Redistributions in binary form must reproduce the above copyright 15251881Speter * notice, this list of conditions and the following disclaimer in the 16251881Speter * documentation and/or other materials provided with the distribution. 17251881Speter * 3. All advertising materials mentioning features or use of this software 18251881Speter * must display the following acknowledgement: 19251881Speter * This product includes software developed by the University of 20251881Speter * California, Berkeley and its contributors. 21251881Speter * 4. Neither the name of the University nor the names of its contributors 22251881Speter * may be used to endorse or promote products derived from this software 23251881Speter * without specific prior written permission. 24251881Speter * 25251881Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26251881Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27251881Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28251881Speter * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29251881Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30251881Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31251881Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32251881Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33251881Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34251881Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35251881Speter * SUCH DAMAGE. 36251881Speter * 37251881Speter * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 38251881Speter * $Id: kern_lock.c,v 1.1 1997/08/04 17:46:51 smp Exp smp $ 39251881Speter */ 40251881Speter 41251881Speter#include <sys/param.h> 42251881Speter#include <sys/proc.h> 43251881Speter#include <sys/lock.h> 44251881Speter#include <sys/systm.h> 45251881Speter 46251881Speter/* 47251881Speter * Locking primitives implementation. 48251881Speter * Locks provide shared/exclusive sychronization. 49251881Speter */ 50251881Speter 51251881Speter#ifdef SIMPLELOCK_DEBUG 52251881Speter#define COUNT(p, x) if (p) (p)->p_locks += (x) 53251881Speter#else 54251881Speter#define COUNT(p, x) 55251881Speter#endif 56251881Speter 57251881Speter#if NCPUS > 1 58251881Speter 59251881Speter/* 60262253Speter * For multiprocessor system, try spin lock first. 61251881Speter * 62251881Speter * This should be inline expanded below, but we cannot have #if 63251881Speter * inside a multiline define. 64251881Speter */ 65251881Speterint lock_wait_time = 100; 66251881Speter#define PAUSE(lkp, wanted) \ 67251881Speter if (lock_wait_time > 0) { \ 68251881Speter int i; \ 69251881Speter \ 70251881Speter simple_unlock(&lkp->lk_interlock); \ 71251881Speter for (i = lock_wait_time; i > 0; i--) \ 72251881Speter if (!(wanted)) \ 73251881Speter break; \ 74251881Speter simple_lock(&lkp->lk_interlock); \ 75251881Speter } \ 76251881Speter if (!(wanted)) \ 77251881Speter break; 78251881Speter 79251881Speter#else /* NCPUS == 1 */ 80251881Speter 81251881Speter/* 82251881Speter * It is an error to spin on a uniprocessor as nothing will ever cause 83251881Speter * the simple lock to clear while we are executing. 84251881Speter */ 85251881Speter#define PAUSE(lkp, wanted) 86251881Speter 87251881Speter#endif /* NCPUS == 1 */ 88251881Speter 89262253Speter/* 90251881Speter * Acquire a resource. 91251881Speter */ 92251881Speter#define ACQUIRE(lkp, error, extflags, wanted) \ 93251881Speter PAUSE(lkp, wanted); \ 94251881Speter for (error = 0; wanted; ) { \ 95251881Speter (lkp)->lk_waitcount++; \ 96251881Speter simple_unlock(&(lkp)->lk_interlock); \ 97251881Speter error = tsleep((void *)lkp, (lkp)->lk_prio, \ 98251881Speter (lkp)->lk_wmesg, (lkp)->lk_timo); \ 99251881Speter simple_lock(&(lkp)->lk_interlock); \ 100251881Speter (lkp)->lk_waitcount--; \ 101251881Speter if (error) \ 102251881Speter break; \ 103251881Speter if ((extflags) & LK_SLEEPFAIL) { \ 104251881Speter error = ENOLCK; \ 105251881Speter break; \ 106251881Speter } \ 107251881Speter } 108251881Speter 109251881Speter/* 110251881Speter * Initialize a lock; required before use. 111251881Speter */ 112251881Spetervoid 113251881Speterlockinit(lkp, prio, wmesg, timo, flags) 114251881Speter struct lock *lkp; 115251881Speter int prio; 116251881Speter char *wmesg; 117251881Speter int timo; 118251881Speter int flags; 119251881Speter{ 120251881Speter 121251881Speter simple_lock_init(&lkp->lk_interlock); 122251881Speter lkp->lk_flags = flags & LK_EXTFLG_MASK; 123251881Speter lkp->lk_sharecount = 0; 124251881Speter lkp->lk_waitcount = 0; 125251881Speter lkp->lk_exclusivecount = 0; 126251881Speter lkp->lk_prio = prio; 127251881Speter lkp->lk_wmesg = wmesg; 128251881Speter lkp->lk_timo = timo; 129251881Speter lkp->lk_lockholder = LK_NOPROC; 130251881Speter} 131251881Speter 132251881Speter/* 133251881Speter * Determine the status of a lock. 134251881Speter */ 135251881Speterint 136251881Speterlockstatus(lkp) 137251881Speter struct lock *lkp; 138251881Speter{ 139251881Speter int lock_type = 0; 140251881Speter 141251881Speter simple_lock(&lkp->lk_interlock); 142251881Speter if (lkp->lk_exclusivecount != 0) 143251881Speter lock_type = LK_EXCLUSIVE; 144251881Speter else if (lkp->lk_sharecount != 0) 145251881Speter lock_type = LK_SHARED; 146251881Speter simple_unlock(&lkp->lk_interlock); 147251881Speter return (lock_type); 148251881Speter} 149251881Speter 150251881Speter/* 151251881Speter * Set, change, or release a lock. 152251881Speter * 153251881Speter * Shared requests increment the shared count. Exclusive requests set the 154251881Speter * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 155251881Speter * accepted shared locks and shared-to-exclusive upgrades to go away. 156251881Speter */ 157251881Speterint 158251881Speterlockmgr(lkp, flags, interlkp, p) 159251881Speter struct lock *lkp; 160251881Speter u_int flags; 161251881Speter struct simplelock *interlkp; 162251881Speter struct proc *p; 163251881Speter{ 164251881Speter int error; 165251881Speter pid_t pid; 166251881Speter int extflags; 167251881Speter 168251881Speter error = 0; 169251881Speter if (p) 170251881Speter pid = p->p_pid; 171251881Speter else 172251881Speter pid = LK_KERNPROC; 173251881Speter simple_lock(&lkp->lk_interlock); 174251881Speter if (flags & LK_INTERLOCK) 175251881Speter simple_unlock(interlkp); 176251881Speter extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 177251881Speter#ifdef DIAGNOSTIC 178251881Speter /* 179251881Speter * Once a lock has drained, the LK_DRAINING flag is set and an 180251881Speter * exclusive lock is returned. The only valid operation thereafter 181251881Speter * is a single release of that exclusive lock. This final release 182251881Speter * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any 183251881Speter * further requests of any sort will result in a panic. The bits 184251881Speter * selected for these two flags are chosen so that they will be set 185251881Speter * in memory that is freed (freed memory is filled with 0xdeadbeef). 186251881Speter * The final release is permitted to give a new lease on life to 187251881Speter * the lock by specifying LK_REENABLE. 188251881Speter */ 189251881Speter if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { 190251881Speter if (lkp->lk_flags & LK_DRAINED) 191251881Speter panic("lockmgr: using decommissioned lock"); 192251881Speter if ((flags & LK_TYPE_MASK) != LK_RELEASE || 193251881Speter lkp->lk_lockholder != pid) 194251881Speter panic("lockmgr: non-release on draining lock: %d\n", 195251881Speter flags & LK_TYPE_MASK); 196251881Speter lkp->lk_flags &= ~LK_DRAINING; 197251881Speter if ((flags & LK_REENABLE) == 0) 198251881Speter lkp->lk_flags |= LK_DRAINED; 199251881Speter } 200251881Speter#endif DIAGNOSTIC 201251881Speter 202251881Speter switch (flags & LK_TYPE_MASK) { 203251881Speter 204251881Speter case LK_SHARED: 205251881Speter if (lkp->lk_lockholder != pid) { 206251881Speter /* 207251881Speter * If just polling, check to see if we will block. 208251881Speter */ 209251881Speter if ((extflags & LK_NOWAIT) && (lkp->lk_flags & 210251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { 211251881Speter error = EBUSY; 212251881Speter break; 213251881Speter } 214251881Speter /* 215251881Speter * Wait for exclusive locks and upgrades to clear. 216251881Speter */ 217251881Speter ACQUIRE(lkp, error, extflags, lkp->lk_flags & 218251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); 219251881Speter if (error) 220251881Speter break; 221251881Speter lkp->lk_sharecount++; 222251881Speter COUNT(p, 1); 223251881Speter break; 224251881Speter } 225251881Speter /* 226251881Speter * We hold an exclusive lock, so downgrade it to shared. 227251881Speter * An alternative would be to fail with EDEADLK. 228251881Speter */ 229251881Speter lkp->lk_sharecount++; 230251881Speter COUNT(p, 1); 231251881Speter /* fall into downgrade */ 232251881Speter 233251881Speter case LK_DOWNGRADE: 234251881Speter if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 235251881Speter panic("lockmgr: not holding exclusive lock"); 236251881Speter lkp->lk_sharecount += lkp->lk_exclusivecount; 237251881Speter lkp->lk_exclusivecount = 0; 238251881Speter lkp->lk_flags &= ~LK_HAVE_EXCL; 239251881Speter lkp->lk_lockholder = LK_NOPROC; 240251881Speter if (lkp->lk_waitcount) 241251881Speter wakeup((void *)lkp); 242251881Speter break; 243251881Speter 244251881Speter case LK_EXCLUPGRADE: 245251881Speter /* 246251881Speter * If another process is ahead of us to get an upgrade, 247251881Speter * then we want to fail rather than have an intervening 248251881Speter * exclusive access. 249251881Speter */ 250251881Speter if (lkp->lk_flags & LK_WANT_UPGRADE) { 251251881Speter lkp->lk_sharecount--; 252251881Speter COUNT(p, -1); 253251881Speter error = EBUSY; 254251881Speter break; 255251881Speter } 256251881Speter /* fall into normal upgrade */ 257251881Speter 258251881Speter case LK_UPGRADE: 259251881Speter /* 260251881Speter * Upgrade a shared lock to an exclusive one. If another 261251881Speter * shared lock has already requested an upgrade to an 262251881Speter * exclusive lock, our shared lock is released and an 263251881Speter * exclusive lock is requested (which will be granted 264251881Speter * after the upgrade). If we return an error, the file 265251881Speter * will always be unlocked. 266251881Speter */ 267251881Speter if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0) 268251881Speter panic("lockmgr: upgrade exclusive lock"); 269251881Speter lkp->lk_sharecount--; 270251881Speter COUNT(p, -1); 271251881Speter /* 272251881Speter * If we are just polling, check to see if we will block. 273251881Speter */ 274251881Speter if ((extflags & LK_NOWAIT) && 275251881Speter ((lkp->lk_flags & LK_WANT_UPGRADE) || 276251881Speter lkp->lk_sharecount > 1)) { 277251881Speter error = EBUSY; 278251881Speter break; 279251881Speter } 280251881Speter if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 281251881Speter /* 282251881Speter * We are first shared lock to request an upgrade, so 283251881Speter * request upgrade and wait for the shared count to 284251881Speter * drop to zero, then take exclusive lock. 285251881Speter */ 286251881Speter lkp->lk_flags |= LK_WANT_UPGRADE; 287251881Speter ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); 288251881Speter lkp->lk_flags &= ~LK_WANT_UPGRADE; 289251881Speter if (error) 290251881Speter break; 291251881Speter lkp->lk_flags |= LK_HAVE_EXCL; 292251881Speter lkp->lk_lockholder = pid; 293251881Speter if (lkp->lk_exclusivecount != 0) 294251881Speter panic("lockmgr: non-zero exclusive count"); 295251881Speter lkp->lk_exclusivecount = 1; 296251881Speter COUNT(p, 1); 297251881Speter break; 298251881Speter } 299251881Speter /* 300251881Speter * Someone else has requested upgrade. Release our shared 301251881Speter * lock, awaken upgrade requestor if we are the last shared 302251881Speter * lock, then request an exclusive lock. 303251881Speter */ 304251881Speter if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) 305251881Speter wakeup((void *)lkp); 306251881Speter /* fall into exclusive request */ 307251881Speter 308251881Speter case LK_EXCLUSIVE: 309251881Speter if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 310251881Speter /* 311251881Speter * Recursive lock. 312251881Speter */ 313251881Speter if ((extflags & LK_CANRECURSE) == 0) 314251881Speter panic("lockmgr: locking against myself"); 315251881Speter lkp->lk_exclusivecount++; 316251881Speter COUNT(p, 1); 317251881Speter break; 318251881Speter } 319251881Speter /* 320251881Speter * If we are just polling, check to see if we will sleep. 321251881Speter */ 322251881Speter if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 323251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 324251881Speter lkp->lk_sharecount != 0)) { 325251881Speter error = EBUSY; 326251881Speter break; 327251881Speter } 328251881Speter /* 329251881Speter * Try to acquire the want_exclusive flag. 330251881Speter */ 331251881Speter ACQUIRE(lkp, error, extflags, lkp->lk_flags & 332251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL)); 333251881Speter if (error) 334251881Speter break; 335251881Speter lkp->lk_flags |= LK_WANT_EXCL; 336251881Speter /* 337251881Speter * Wait for shared locks and upgrades to finish. 338251881Speter */ 339251881Speter ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || 340251881Speter (lkp->lk_flags & LK_WANT_UPGRADE)); 341251881Speter lkp->lk_flags &= ~LK_WANT_EXCL; 342251881Speter if (error) 343251881Speter break; 344251881Speter lkp->lk_flags |= LK_HAVE_EXCL; 345251881Speter lkp->lk_lockholder = pid; 346251881Speter if (lkp->lk_exclusivecount != 0) 347251881Speter panic("lockmgr: non-zero exclusive count"); 348251881Speter lkp->lk_exclusivecount = 1; 349251881Speter COUNT(p, 1); 350251881Speter break; 351251881Speter 352251881Speter case LK_RELEASE: 353251881Speter if (lkp->lk_exclusivecount != 0) { 354251881Speter if (pid != lkp->lk_lockholder) 355251881Speter panic("lockmgr: pid %d, not %s %d unlocking", 356251881Speter pid, "exclusive lock holder", 357251881Speter lkp->lk_lockholder); 358251881Speter lkp->lk_exclusivecount--; 359251881Speter COUNT(p, -1); 360251881Speter if (lkp->lk_exclusivecount == 0) { 361251881Speter lkp->lk_flags &= ~LK_HAVE_EXCL; 362251881Speter lkp->lk_lockholder = LK_NOPROC; 363251881Speter } 364251881Speter } else if (lkp->lk_sharecount != 0) { 365251881Speter lkp->lk_sharecount--; 366251881Speter COUNT(p, -1); 367251881Speter } 368251881Speter if (lkp->lk_waitcount) 369251881Speter wakeup((void *)lkp); 370251881Speter break; 371251881Speter 372251881Speter case LK_DRAIN: 373251881Speter /* 374251881Speter * Check that we do not already hold the lock, as it can 375251881Speter * never drain if we do. Unfortunately, we have no way to 376251881Speter * check for holding a shared lock, but at least we can 377251881Speter * check for an exclusive one. 378251881Speter */ 379251881Speter if (lkp->lk_lockholder == pid) 380251881Speter panic("lockmgr: draining against myself"); 381251881Speter /* 382251881Speter * If we are just polling, check to see if we will sleep. 383251881Speter */ 384251881Speter if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & 385251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 386251881Speter lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { 387251881Speter error = EBUSY; 388251881Speter break; 389251881Speter } 390251881Speter PAUSE(lkp, ((lkp->lk_flags & 391251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 392251881Speter lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); 393251881Speter for (error = 0; ((lkp->lk_flags & 394251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || 395251881Speter lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { 396251881Speter lkp->lk_flags |= LK_WAITDRAIN; 397251881Speter simple_unlock(&lkp->lk_interlock); 398251881Speter if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, 399251881Speter lkp->lk_wmesg, lkp->lk_timo)) 400251881Speter return (error); 401251881Speter if ((extflags) & LK_SLEEPFAIL) 402251881Speter return (ENOLCK); 403251881Speter simple_lock(&lkp->lk_interlock); 404251881Speter } 405251881Speter lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 406251881Speter lkp->lk_lockholder = pid; 407251881Speter lkp->lk_exclusivecount = 1; 408251881Speter COUNT(p, 1); 409251881Speter break; 410251881Speter 411251881Speter default: 412251881Speter simple_unlock(&lkp->lk_interlock); 413251881Speter panic("lockmgr: unknown locktype request %d", 414251881Speter flags & LK_TYPE_MASK); 415251881Speter /* NOTREACHED */ 416251881Speter } 417251881Speter if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & 418251881Speter (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && 419251881Speter lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { 420251881Speter lkp->lk_flags &= ~LK_WAITDRAIN; 421251881Speter wakeup((void *)&lkp->lk_flags); 422251881Speter } 423251881Speter simple_unlock(&lkp->lk_interlock); 424251881Speter return (error); 425251881Speter} 426251881Speter 427251881Speter/* 428251881Speter * Print out information about state of a lock. Used by VOP_PRINT 429251881Speter * routines to display ststus about contained locks. 430251881Speter */ 431251881Spetervoid 432251881Speterlockmgr_printinfo(lkp) 433251881Speter struct lock *lkp; 434251881Speter{ 435251881Speter 436251881Speter if (lkp->lk_sharecount) 437251881Speter printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 438251881Speter lkp->lk_sharecount); 439251881Speter else if (lkp->lk_flags & LK_HAVE_EXCL) 440251881Speter printf(" lock type %s: EXCL (count %d) by pid %d", 441251881Speter lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 442251881Speter if (lkp->lk_waitcount > 0) 443251881Speter printf(" with %d pending", lkp->lk_waitcount); 444251881Speter} 445251881Speter 446251881Speter#if defined(SIMPLELOCK_DEBUG) && NCPUS == 1 447251881Speter#include <sys/kernel.h> 448251881Speter#include <sys/sysctl.h> 449251881Speter 450251881Speterstatic int lockpausetime = 0; 451251881SpeterSYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, ""); 452251881Speter 453251881Speterint simplelockrecurse; 454251881Speter 455251881Speter/* 456251881Speter * Simple lock functions so that the debugger can see from whence 457251881Speter * they are being called. 458251881Speter */ 459251881Spetervoid 460251881Spetersimple_lock_init(alp) 461251881Speter struct simplelock *alp; 462251881Speter{ 463251881Speter 464251881Speter alp->lock_data = 0; 465251881Speter} 466251881Speter 467251881Spetervoid 468251881Speter_simple_lock(alp, id, l) 469251881Speter struct simplelock *alp; 470251881Speter const char *id; 471251881Speter int l; 472251881Speter{ 473251881Speter 474251881Speter if (simplelockrecurse) 475251881Speter return; 476251881Speter if (alp->lock_data == 1) { 477251881Speter if (lockpausetime == -1) 478251881Speter panic("%s:%d: simple_lock: lock held", id, l); 479251881Speter printf("%s:%d: simple_lock: lock held\n", id, l); 480251881Speter if (lockpausetime == 1) { 481251881Speter Debugger("simple_lock"); 482251881Speter /*BACKTRACE(curproc); */ 483251881Speter } else if (lockpausetime > 1) { 484251881Speter printf("%s:%d: simple_lock: lock held...", id, l); 485251881Speter tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 486251881Speter lockpausetime * hz); 487251881Speter printf(" continuing\n"); 488251881Speter } 489251881Speter } 490251881Speter alp->lock_data = 1; 491251881Speter if (curproc) 492251881Speter curproc->p_simple_locks++; 493251881Speter} 494251881Speter 495251881Speterint 496251881Speter_simple_lock_try(alp, id, l) 497251881Speter struct simplelock *alp; 498251881Speter const char *id; 499251881Speter int l; 500251881Speter{ 501251881Speter 502251881Speter if (alp->lock_data) 503251881Speter return (0); 504251881Speter if (simplelockrecurse) 505251881Speter return (1); 506251881Speter alp->lock_data = 1; 507251881Speter if (curproc) 508251881Speter curproc->p_simple_locks++; 509251881Speter return (1); 510251881Speter} 511251881Speter 512251881Spetervoid 513251881Speter_simple_unlock(alp, id, l) 514251881Speter struct simplelock *alp; 515251881Speter const char *id; 516251881Speter int l; 517251881Speter{ 518251881Speter 519251881Speter if (simplelockrecurse) 520251881Speter return; 521251881Speter if (alp->lock_data == 0) { 522251881Speter if (lockpausetime == -1) 523251881Speter panic("%s:%d: simple_unlock: lock not held", id, l); 524251881Speter printf("%s:%d: simple_unlock: lock not held\n", id, l); 525251881Speter if (lockpausetime == 1) { 526251881Speter Debugger("simple_unlock"); 527251881Speter /* BACKTRACE(curproc); */ 528251881Speter } else if (lockpausetime > 1) { 529251881Speter printf("%s:%d: simple_unlock: lock not held...", id, l); 530251881Speter tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 531251881Speter lockpausetime * hz); 532251881Speter printf(" continuing\n"); 533251881Speter } 534251881Speter } 535251881Speter alp->lock_data = 0; 536251881Speter if (curproc) 537251881Speter curproc->p_simple_locks--; 538251881Speter} 539251881Speter#endif /* SIMPLELOCK_DEBUG && NCPUS == 1 */ 540251881Speter