kern_lock.c revision 76100
1/*
2 * Copyright (c) 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Copyright (C) 1997
6 *	John S. Dyson.  All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41 * $FreeBSD: head/sys/kern/kern_lock.c 76100 2001-04-28 12:11:01Z alfred $
42 */
43
44#include <sys/param.h>
45#include <sys/proc.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mutex.h>
50#include <sys/systm.h>
51
52/*
53 * Locking primitives implementation.
54 * Locks provide shared/exclusive sychronization.
55 */
56
57#define LOCK_WAIT_TIME 100
58#define LOCK_SAMPLE_WAIT 7
59
60#if defined(DIAGNOSTIC)
61#define LOCK_INLINE
62#else
63#define LOCK_INLINE __inline
64#endif
65
66#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
67	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
68
69/*
70 * Mutex array variables.  Rather than each lockmgr lock having its own mutex,
71 * share a fixed (at boot time) number of mutexes across all lockmgr locks in
72 * order to keep sizeof(struct lock) down.
73 */
74extern int lock_nmtx;
75int lock_mtx_selector;
76struct mtx *lock_mtx_array;
77static struct mtx lock_mtx;
78
79static int acquire(struct lock *lkp, int extflags, int wanted);
80static int apause(struct lock *lkp, int flags);
81static int acquiredrain(struct lock *lkp, int extflags) ;
82
83static void
84lockmgr_init(void *dummy __unused)
85{
86	int	i;
87
88	/*
89	 * Initialize the lockmgr protection mutex if it hasn't already been
90	 * done.  Unless something changes about kernel startup order, VM
91	 * initialization will always cause this mutex to already be
92	 * initialized in a call to lockinit().
93	 */
94	if (lock_mtx_selector == 0)
95		mtx_init(&lock_mtx, "lockmgr", MTX_DEF);
96	else {
97		/*
98		 * This is necessary if (lock_nmtx == 1) and doesn't hurt
99		 * otherwise.
100		 */
101		lock_mtx_selector = 0;
102	}
103
104	lock_mtx_array = (struct mtx *)malloc(sizeof(struct mtx) * lock_nmtx,
105	    M_CACHE, M_WAITOK);
106	for (i = 0; i < lock_nmtx; i++)
107		mtx_init(&lock_mtx_array[i], "lockmgr interlock", MTX_DEF);
108}
109SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL)
110
111static LOCK_INLINE void
112sharelock(struct lock *lkp, int incr) {
113	lkp->lk_flags |= LK_SHARE_NONZERO;
114	lkp->lk_sharecount += incr;
115}
116
117static LOCK_INLINE void
118shareunlock(struct lock *lkp, int decr) {
119
120	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
121
122	if (lkp->lk_sharecount == decr) {
123		lkp->lk_flags &= ~LK_SHARE_NONZERO;
124		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
125			wakeup(lkp);
126		}
127		lkp->lk_sharecount = 0;
128	} else {
129		lkp->lk_sharecount -= decr;
130	}
131}
132
133/*
134 * This is the waitloop optimization.
135 */
136static int
137apause(struct lock *lkp, int flags)
138{
139#ifdef SMP
140	int i, lock_wait;
141#endif
142
143	if ((lkp->lk_flags & flags) == 0)
144		return 0;
145#ifdef SMP
146	for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
147		mtx_unlock(lkp->lk_interlock);
148		for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
149			if ((lkp->lk_flags & flags) == 0)
150				break;
151		mtx_lock(lkp->lk_interlock);
152		if ((lkp->lk_flags & flags) == 0)
153			return 0;
154	}
155#endif
156	return 1;
157}
158
159static int
160acquire(struct lock *lkp, int extflags, int wanted) {
161	int s, error;
162
163	CTR3(KTR_LOCKMGR,
164	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n",
165	    lkp, extflags, wanted);
166
167	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
168		return EBUSY;
169	}
170
171	if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) {
172		error = apause(lkp, wanted);
173		if (error == 0)
174			return 0;
175	}
176
177	s = splhigh();
178	while ((lkp->lk_flags & wanted) != 0) {
179		lkp->lk_flags |= LK_WAIT_NONZERO;
180		lkp->lk_waitcount++;
181		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
182		    lkp->lk_wmesg, lkp->lk_timo);
183		if (lkp->lk_waitcount == 1) {
184			lkp->lk_flags &= ~LK_WAIT_NONZERO;
185			lkp->lk_waitcount = 0;
186		} else {
187			lkp->lk_waitcount--;
188		}
189		if (error) {
190			splx(s);
191			return error;
192		}
193		if (extflags & LK_SLEEPFAIL) {
194			splx(s);
195			return ENOLCK;
196		}
197	}
198	splx(s);
199	return 0;
200}
201
202/*
203 * Set, change, or release a lock.
204 *
205 * Shared requests increment the shared count. Exclusive requests set the
206 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
207 * accepted shared locks and shared-to-exclusive upgrades to go away.
208 */
209int
210#ifndef	DEBUG_LOCKS
211lockmgr(lkp, flags, interlkp, p)
212#else
213debuglockmgr(lkp, flags, interlkp, p, name, file, line)
214#endif
215	struct lock *lkp;
216	u_int flags;
217	struct mtx *interlkp;
218	struct proc *p;
219#ifdef	DEBUG_LOCKS
220	const char *name;	/* Name of lock function */
221	const char *file;	/* Name of file call is from */
222	int line;		/* Line number in file */
223#endif
224{
225	int error;
226	pid_t pid;
227	int extflags, lockflags;
228
229	CTR5(KTR_LOCKMGR,
230	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
231	    "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p);
232
233	error = 0;
234	if (p == NULL)
235		pid = LK_KERNPROC;
236	else
237		pid = p->p_pid;
238
239	mtx_lock(lkp->lk_interlock);
240	if (flags & LK_INTERLOCK) {
241		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
242		mtx_unlock(interlkp);
243	}
244
245	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
246
247	switch (flags & LK_TYPE_MASK) {
248
249	case LK_SHARED:
250		/*
251		 * If we are not the exclusive lock holder, we have to block
252		 * while there is an exclusive lock holder or while an
253		 * exclusive lock request or upgrade request is in progress.
254		 *
255		 * However, if P_DEADLKTREAT is set, we override exclusive
256		 * lock requests or upgrade requests ( but not the exclusive
257		 * lock itself ).
258		 */
259		if (lkp->lk_lockholder != pid) {
260			lockflags = LK_HAVE_EXCL;
261			if (p) {
262				PROC_LOCK(p);
263				if (!(p->p_flag & P_DEADLKTREAT)) {
264					lockflags |= LK_WANT_EXCL |
265					    LK_WANT_UPGRADE;
266				}
267				PROC_UNLOCK(p);
268			}
269			error = acquire(lkp, extflags, lockflags);
270			if (error)
271				break;
272			sharelock(lkp, 1);
273			break;
274		}
275		/*
276		 * We hold an exclusive lock, so downgrade it to shared.
277		 * An alternative would be to fail with EDEADLK.
278		 */
279		sharelock(lkp, 1);
280		/* fall into downgrade */
281
282	case LK_DOWNGRADE:
283		KASSERT(lkp->lk_lockholder == pid && lkp->lk_exclusivecount != 0,
284			("lockmgr: not holding exclusive lock "
285			"(owner pid (%d) != pid (%d), exlcnt (%d) != 0",
286			lkp->lk_lockholder, pid, lkp->lk_exclusivecount));
287		sharelock(lkp, lkp->lk_exclusivecount);
288		lkp->lk_exclusivecount = 0;
289		lkp->lk_flags &= ~LK_HAVE_EXCL;
290		lkp->lk_lockholder = LK_NOPROC;
291		if (lkp->lk_waitcount)
292			wakeup((void *)lkp);
293		break;
294
295	case LK_EXCLUPGRADE:
296		/*
297		 * If another process is ahead of us to get an upgrade,
298		 * then we want to fail rather than have an intervening
299		 * exclusive access.
300		 */
301		if (lkp->lk_flags & LK_WANT_UPGRADE) {
302			shareunlock(lkp, 1);
303			error = EBUSY;
304			break;
305		}
306		/* fall into normal upgrade */
307
308	case LK_UPGRADE:
309		/*
310		 * Upgrade a shared lock to an exclusive one. If another
311		 * shared lock has already requested an upgrade to an
312		 * exclusive lock, our shared lock is released and an
313		 * exclusive lock is requested (which will be granted
314		 * after the upgrade). If we return an error, the file
315		 * will always be unlocked.
316		 */
317		if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
318			panic("lockmgr: upgrade exclusive lock");
319		shareunlock(lkp, 1);
320		/*
321		 * If we are just polling, check to see if we will block.
322		 */
323		if ((extflags & LK_NOWAIT) &&
324		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
325		     lkp->lk_sharecount > 1)) {
326			error = EBUSY;
327			break;
328		}
329		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
330			/*
331			 * We are first shared lock to request an upgrade, so
332			 * request upgrade and wait for the shared count to
333			 * drop to zero, then take exclusive lock.
334			 */
335			lkp->lk_flags |= LK_WANT_UPGRADE;
336			error = acquire(lkp, extflags, LK_SHARE_NONZERO);
337			lkp->lk_flags &= ~LK_WANT_UPGRADE;
338
339			if (error)
340				break;
341			lkp->lk_flags |= LK_HAVE_EXCL;
342			lkp->lk_lockholder = pid;
343			if (lkp->lk_exclusivecount != 0)
344				panic("lockmgr: non-zero exclusive count");
345			lkp->lk_exclusivecount = 1;
346#if defined(DEBUG_LOCKS)
347			lkp->lk_filename = file;
348			lkp->lk_lineno = line;
349			lkp->lk_lockername = name;
350#endif
351			break;
352		}
353		/*
354		 * Someone else has requested upgrade. Release our shared
355		 * lock, awaken upgrade requestor if we are the last shared
356		 * lock, then request an exclusive lock.
357		 */
358		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
359			LK_WAIT_NONZERO)
360			wakeup((void *)lkp);
361		/* fall into exclusive request */
362
363	case LK_EXCLUSIVE:
364		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
365			/*
366			 *	Recursive lock.
367			 */
368			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
369				panic("lockmgr: locking against myself");
370			if ((extflags & LK_CANRECURSE) != 0) {
371				lkp->lk_exclusivecount++;
372				break;
373			}
374		}
375		/*
376		 * If we are just polling, check to see if we will sleep.
377		 */
378		if ((extflags & LK_NOWAIT) &&
379		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
380			error = EBUSY;
381			break;
382		}
383		/*
384		 * Try to acquire the want_exclusive flag.
385		 */
386		error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
387		if (error)
388			break;
389		lkp->lk_flags |= LK_WANT_EXCL;
390		/*
391		 * Wait for shared locks and upgrades to finish.
392		 */
393		error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
394		lkp->lk_flags &= ~LK_WANT_EXCL;
395		if (error)
396			break;
397		lkp->lk_flags |= LK_HAVE_EXCL;
398		lkp->lk_lockholder = pid;
399		if (lkp->lk_exclusivecount != 0)
400			panic("lockmgr: non-zero exclusive count");
401		lkp->lk_exclusivecount = 1;
402#if defined(DEBUG_LOCKS)
403			lkp->lk_filename = file;
404			lkp->lk_lineno = line;
405			lkp->lk_lockername = name;
406#endif
407		break;
408
409	case LK_RELEASE:
410		if (lkp->lk_exclusivecount != 0) {
411			if (lkp->lk_lockholder != pid &&
412			    lkp->lk_lockholder != LK_KERNPROC) {
413				panic("lockmgr: pid %d, not %s %d unlocking",
414				    pid, "exclusive lock holder",
415				    lkp->lk_lockholder);
416			}
417			if (lkp->lk_exclusivecount == 1) {
418				lkp->lk_flags &= ~LK_HAVE_EXCL;
419				lkp->lk_lockholder = LK_NOPROC;
420				lkp->lk_exclusivecount = 0;
421			} else {
422				lkp->lk_exclusivecount--;
423			}
424		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
425			shareunlock(lkp, 1);
426		if (lkp->lk_flags & LK_WAIT_NONZERO)
427			wakeup((void *)lkp);
428		break;
429
430	case LK_DRAIN:
431		/*
432		 * Check that we do not already hold the lock, as it can
433		 * never drain if we do. Unfortunately, we have no way to
434		 * check for holding a shared lock, but at least we can
435		 * check for an exclusive one.
436		 */
437		if (lkp->lk_lockholder == pid)
438			panic("lockmgr: draining against myself");
439
440		error = acquiredrain(lkp, extflags);
441		if (error)
442			break;
443		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
444		lkp->lk_lockholder = pid;
445		lkp->lk_exclusivecount = 1;
446#if defined(DEBUG_LOCKS)
447			lkp->lk_filename = file;
448			lkp->lk_lineno = line;
449			lkp->lk_lockername = name;
450#endif
451		break;
452
453	default:
454		mtx_unlock(lkp->lk_interlock);
455		panic("lockmgr: unknown locktype request %d",
456		    flags & LK_TYPE_MASK);
457		/* NOTREACHED */
458	}
459	if ((lkp->lk_flags & LK_WAITDRAIN) &&
460	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
461		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
462		lkp->lk_flags &= ~LK_WAITDRAIN;
463		wakeup((void *)&lkp->lk_flags);
464	}
465	mtx_unlock(lkp->lk_interlock);
466	return (error);
467}
468
469static int
470acquiredrain(struct lock *lkp, int extflags) {
471	int error;
472
473	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
474		return EBUSY;
475	}
476
477	error = apause(lkp, LK_ALL);
478	if (error == 0)
479		return 0;
480
481	while (lkp->lk_flags & LK_ALL) {
482		lkp->lk_flags |= LK_WAITDRAIN;
483		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
484			lkp->lk_wmesg, lkp->lk_timo);
485		if (error)
486			return error;
487		if (extflags & LK_SLEEPFAIL) {
488			return ENOLCK;
489		}
490	}
491	return 0;
492}
493
494/*
495 * Initialize a lock; required before use.
496 */
497void
498lockinit(lkp, prio, wmesg, timo, flags)
499	struct lock *lkp;
500	int prio;
501	char *wmesg;
502	int timo;
503	int flags;
504{
505	CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
506	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
507
508	if (lock_mtx_array != NULL) {
509		mtx_lock(&lock_mtx);
510		lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector];
511		lock_mtx_selector++;
512		if (lock_mtx_selector == lock_nmtx)
513			lock_mtx_selector = 0;
514		mtx_unlock(&lock_mtx);
515	} else {
516		/*
517		 * Giving lockmgr locks that are initialized during boot a
518		 * pointer to the internal lockmgr mutex is safe, since the
519		 * lockmgr code itself doesn't call lockinit() (which could
520		 * cause mutex recursion).
521		 */
522		if (lock_mtx_selector == 0) {
523			/*
524			 * This  case only happens during kernel bootstrapping,
525			 * so there's no reason to protect modification of
526			 * lock_mtx_selector or lock_mtx.
527			 */
528			mtx_init(&lock_mtx, "lockmgr", MTX_DEF);
529			lock_mtx_selector = 1;
530		}
531		lkp->lk_interlock = &lock_mtx;
532	}
533	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
534	lkp->lk_sharecount = 0;
535	lkp->lk_waitcount = 0;
536	lkp->lk_exclusivecount = 0;
537	lkp->lk_prio = prio;
538	lkp->lk_wmesg = wmesg;
539	lkp->lk_timo = timo;
540	lkp->lk_lockholder = LK_NOPROC;
541}
542
543/*
544 * Destroy a lock.
545 */
546void
547lockdestroy(lkp)
548	struct lock *lkp;
549{
550	CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
551	    lkp, lkp->lk_wmesg);
552}
553
554/*
555 * Determine the status of a lock.
556 */
557int
558lockstatus(lkp, p)
559	struct lock *lkp;
560	struct proc *p;
561{
562	int lock_type = 0;
563
564	mtx_lock(lkp->lk_interlock);
565	if (lkp->lk_exclusivecount != 0) {
566		if (p == NULL || lkp->lk_lockholder == p->p_pid)
567			lock_type = LK_EXCLUSIVE;
568		else
569			lock_type = LK_EXCLOTHER;
570	} else if (lkp->lk_sharecount != 0)
571		lock_type = LK_SHARED;
572	mtx_unlock(lkp->lk_interlock);
573	return (lock_type);
574}
575
576/*
577 * Determine the number of holders of a lock.
578 */
579int
580lockcount(lkp)
581	struct lock *lkp;
582{
583	int count;
584
585	mtx_lock(lkp->lk_interlock);
586	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
587	mtx_unlock(lkp->lk_interlock);
588	return (count);
589}
590
591/*
592 * Print out information about state of a lock. Used by VOP_PRINT
593 * routines to display status about contained locks.
594 */
595void
596lockmgr_printinfo(lkp)
597	struct lock *lkp;
598{
599
600	if (lkp->lk_sharecount)
601		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
602		    lkp->lk_sharecount);
603	else if (lkp->lk_flags & LK_HAVE_EXCL)
604		printf(" lock type %s: EXCL (count %d) by pid %d",
605		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
606	if (lkp->lk_waitcount > 0)
607		printf(" with %d pending", lkp->lk_waitcount);
608}
609