kern_lockf.c revision 82189
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
37 * $FreeBSD: head/sys/kern/kern_lockf.c 82189 2001-08-23 13:21:17Z ache $
38 */
39
40#include "opt_debug_lockf.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/unistd.h>
49#include <sys/vnode.h>
50#include <sys/malloc.h>
51#include <sys/fcntl.h>
52#include <sys/lockf.h>
53
54#include <machine/limits.h>
55
56/*
57 * This variable controls the maximum number of processes that will
58 * be checked in doing deadlock detection.
59 */
60static int maxlockdepth = MAXDEPTH;
61
62#ifdef LOCKF_DEBUG
63#include <sys/kernel.h>
64#include <sys/sysctl.h>
65
66#include <ufs/ufs/quota.h>
67#include <ufs/ufs/inode.h>
68
69
70static int	lockf_debug = 0;
71SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
72#endif
73
74MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
75
76#define NOLOCKF (struct lockf *)0
77#define SELF	0x1
78#define OTHERS	0x2
79static int	 lf_clearlock __P((struct lockf *));
80static int	 lf_findoverlap __P((struct lockf *,
81	    struct lockf *, int, struct lockf ***, struct lockf **));
82static struct lockf *
83	 lf_getblock __P((struct lockf *));
84static int	 lf_getlock __P((struct lockf *, struct flock *));
85static int	 lf_setlock __P((struct lockf *));
86static void	 lf_split __P((struct lockf *, struct lockf *));
87static void	 lf_wakelock __P((struct lockf *));
88
89/*
90 * Advisory record locking support
91 */
92int
93lf_advlock(ap, head, size)
94	struct vop_advlock_args /* {
95		struct vnode *a_vp;
96		caddr_t  a_id;
97		int  a_op;
98		struct flock *a_fl;
99		int  a_flags;
100	} */ *ap;
101	struct lockf **head;
102	u_quad_t size;
103{
104	register struct flock *fl = ap->a_fl;
105	register struct lockf *lock;
106	off_t start, end;
107	int error;
108
109	if (fl->l_len < 0)
110		return (EINVAL);
111	/*
112	 * Convert the flock structure into a start and end.
113	 */
114	switch (fl->l_whence) {
115
116	case SEEK_SET:
117	case SEEK_CUR:
118		/*
119		 * Caller is responsible for adding any necessary offset
120		 * when SEEK_CUR is used.
121		 */
122		start = fl->l_start;
123		break;
124
125	case SEEK_END:
126		/* size always >= 0 */
127		if (fl->l_start > 0 && size > OFF_MAX - fl->l_start)
128			return (EOVERFLOW);
129		start = size + fl->l_start;
130		break;
131
132	default:
133		return (EINVAL);
134	}
135	if (start < 0)
136		return (EINVAL);
137	if (fl->l_len == 0)
138		end = -1;
139	else {
140		off_t oadd = fl->l_len - 1;
141
142		/* fl->l_len & start are non-negative */
143		if (oadd > OFF_MAX - start)
144			return (EOVERFLOW);
145		end = start + oadd;
146		if (end < start)
147			return (EINVAL);
148	}
149	/*
150	 * Avoid the common case of unlocking when inode has no locks.
151	 */
152	if (*head == (struct lockf *)0) {
153		if (ap->a_op != F_SETLK) {
154			fl->l_type = F_UNLCK;
155			return (0);
156		}
157	}
158	/*
159	 * Create the lockf structure
160	 */
161	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
162	lock->lf_start = start;
163	lock->lf_end = end;
164	lock->lf_id = ap->a_id;
165/*	lock->lf_inode = ip; */	/* XXX JH */
166	lock->lf_type = fl->l_type;
167	lock->lf_head = head;
168	lock->lf_next = (struct lockf *)0;
169	TAILQ_INIT(&lock->lf_blkhd);
170	lock->lf_flags = ap->a_flags;
171	/*
172	 * Do the requested operation.
173	 */
174	switch(ap->a_op) {
175	case F_SETLK:
176		return (lf_setlock(lock));
177
178	case F_UNLCK:
179		error = lf_clearlock(lock);
180		FREE(lock, M_LOCKF);
181		return (error);
182
183	case F_GETLK:
184		error = lf_getlock(lock, fl);
185		FREE(lock, M_LOCKF);
186		return (error);
187
188	default:
189		free(lock, M_LOCKF);
190		return (EINVAL);
191	}
192	/* NOTREACHED */
193}
194
195/*
196 * Set a byte-range lock.
197 */
198static int
199lf_setlock(lock)
200	register struct lockf *lock;
201{
202	register struct lockf *block;
203	struct lockf **head = lock->lf_head;
204	struct lockf **prev, *overlap, *ltmp;
205	static char lockstr[] = "lockf";
206	int ovcase, priority, needtolink, error;
207
208#ifdef LOCKF_DEBUG
209	if (lockf_debug & 1)
210		lf_print("lf_setlock", lock);
211#endif /* LOCKF_DEBUG */
212
213	/*
214	 * Set the priority
215	 */
216	priority = PLOCK;
217	if (lock->lf_type == F_WRLCK)
218		priority += 4;
219	priority |= PCATCH;
220	/*
221	 * Scan lock list for this file looking for locks that would block us.
222	 */
223	while ((block = lf_getblock(lock))) {
224		/*
225		 * Free the structure and return if nonblocking.
226		 */
227		if ((lock->lf_flags & F_WAIT) == 0) {
228			FREE(lock, M_LOCKF);
229			return (EAGAIN);
230		}
231		/*
232		 * We are blocked. Since flock style locks cover
233		 * the whole file, there is no chance for deadlock.
234		 * For byte-range locks we must check for deadlock.
235		 *
236		 * Deadlock detection is done by looking through the
237		 * wait channels to see if there are any cycles that
238		 * involve us. MAXDEPTH is set just to make sure we
239		 * do not go off into neverland.
240		 */
241		if ((lock->lf_flags & F_POSIX) &&
242		    (block->lf_flags & F_POSIX)) {
243			register struct proc *wproc;
244			register struct lockf *waitblock;
245			int i = 0;
246
247			/* The block is waiting on something */
248			wproc = (struct proc *)block->lf_id;
249			mtx_lock_spin(&sched_lock);
250			while (wproc->p_wchan &&
251			       (wproc->p_wmesg == lockstr) &&
252			       (i++ < maxlockdepth)) {
253				waitblock = (struct lockf *)wproc->p_wchan;
254				/* Get the owner of the blocking lock */
255				waitblock = waitblock->lf_next;
256				if ((waitblock->lf_flags & F_POSIX) == 0)
257					break;
258				wproc = (struct proc *)waitblock->lf_id;
259				if (wproc == (struct proc *)lock->lf_id) {
260					mtx_unlock_spin(&sched_lock);
261					free(lock, M_LOCKF);
262					return (EDEADLK);
263				}
264			}
265			mtx_unlock_spin(&sched_lock);
266		}
267		/*
268		 * For flock type locks, we must first remove
269		 * any shared locks that we hold before we sleep
270		 * waiting for an exclusive lock.
271		 */
272		if ((lock->lf_flags & F_FLOCK) &&
273		    lock->lf_type == F_WRLCK) {
274			lock->lf_type = F_UNLCK;
275			(void) lf_clearlock(lock);
276			lock->lf_type = F_WRLCK;
277		}
278		/*
279		 * Add our lock to the blocked list and sleep until we're free.
280		 * Remember who blocked us (for deadlock detection).
281		 */
282		lock->lf_next = block;
283		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
284#ifdef LOCKF_DEBUG
285		if (lockf_debug & 1) {
286			lf_print("lf_setlock: blocking on", block);
287			lf_printlist("lf_setlock", block);
288		}
289#endif /* LOCKF_DEBUG */
290		error = tsleep((caddr_t)lock, priority, lockstr, 0);
291		/*
292		 * We may have been awakened by a signal and/or by a
293		 * debugger continuing us (in which cases we must remove
294		 * ourselves from the blocked list) and/or by another
295		 * process releasing a lock (in which case we have
296		 * already been removed from the blocked list and our
297		 * lf_next field set to NOLOCKF).
298		 */
299		if (lock->lf_next) {
300			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
301			lock->lf_next = NOLOCKF;
302		}
303		if (error) {
304			free(lock, M_LOCKF);
305			return (error);
306		}
307	}
308	/*
309	 * No blocks!!  Add the lock.  Note that we will
310	 * downgrade or upgrade any overlapping locks this
311	 * process already owns.
312	 *
313	 * Skip over locks owned by other processes.
314	 * Handle any locks that overlap and are owned by ourselves.
315	 */
316	prev = head;
317	block = *head;
318	needtolink = 1;
319	for (;;) {
320		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
321		if (ovcase)
322			block = overlap->lf_next;
323		/*
324		 * Six cases:
325		 *	0) no overlap
326		 *	1) overlap == lock
327		 *	2) overlap contains lock
328		 *	3) lock contains overlap
329		 *	4) overlap starts before lock
330		 *	5) overlap ends after lock
331		 */
332		switch (ovcase) {
333		case 0: /* no overlap */
334			if (needtolink) {
335				*prev = lock;
336				lock->lf_next = overlap;
337			}
338			break;
339
340		case 1: /* overlap == lock */
341			/*
342			 * If downgrading lock, others may be
343			 * able to acquire it.
344			 */
345			if (lock->lf_type == F_RDLCK &&
346			    overlap->lf_type == F_WRLCK)
347				lf_wakelock(overlap);
348			overlap->lf_type = lock->lf_type;
349			FREE(lock, M_LOCKF);
350			lock = overlap; /* for debug output below */
351			break;
352
353		case 2: /* overlap contains lock */
354			/*
355			 * Check for common starting point and different types.
356			 */
357			if (overlap->lf_type == lock->lf_type) {
358				free(lock, M_LOCKF);
359				lock = overlap; /* for debug output below */
360				break;
361			}
362			if (overlap->lf_start == lock->lf_start) {
363				*prev = lock;
364				lock->lf_next = overlap;
365				overlap->lf_start = lock->lf_end + 1;
366			} else
367				lf_split(overlap, lock);
368			lf_wakelock(overlap);
369			break;
370
371		case 3: /* lock contains overlap */
372			/*
373			 * If downgrading lock, others may be able to
374			 * acquire it, otherwise take the list.
375			 */
376			if (lock->lf_type == F_RDLCK &&
377			    overlap->lf_type == F_WRLCK) {
378				lf_wakelock(overlap);
379			} else {
380				while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
381					ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
382					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
383					    lf_block);
384					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
385					    ltmp, lf_block);
386					ltmp->lf_next = lock;
387				}
388			}
389			/*
390			 * Add the new lock if necessary and delete the overlap.
391			 */
392			if (needtolink) {
393				*prev = lock;
394				lock->lf_next = overlap->lf_next;
395				prev = &lock->lf_next;
396				needtolink = 0;
397			} else
398				*prev = overlap->lf_next;
399			free(overlap, M_LOCKF);
400			continue;
401
402		case 4: /* overlap starts before lock */
403			/*
404			 * Add lock after overlap on the list.
405			 */
406			lock->lf_next = overlap->lf_next;
407			overlap->lf_next = lock;
408			overlap->lf_end = lock->lf_start - 1;
409			prev = &lock->lf_next;
410			lf_wakelock(overlap);
411			needtolink = 0;
412			continue;
413
414		case 5: /* overlap ends after lock */
415			/*
416			 * Add the new lock before overlap.
417			 */
418			if (needtolink) {
419				*prev = lock;
420				lock->lf_next = overlap;
421			}
422			overlap->lf_start = lock->lf_end + 1;
423			lf_wakelock(overlap);
424			break;
425		}
426		break;
427	}
428#ifdef LOCKF_DEBUG
429	if (lockf_debug & 1) {
430		lf_print("lf_setlock: got the lock", lock);
431		lf_printlist("lf_setlock", lock);
432	}
433#endif /* LOCKF_DEBUG */
434	return (0);
435}
436
437/*
438 * Remove a byte-range lock on an inode.
439 *
440 * Generally, find the lock (or an overlap to that lock)
441 * and remove it (or shrink it), then wakeup anyone we can.
442 */
443static int
444lf_clearlock(unlock)
445	register struct lockf *unlock;
446{
447	struct lockf **head = unlock->lf_head;
448	register struct lockf *lf = *head;
449	struct lockf *overlap, **prev;
450	int ovcase;
451
452	if (lf == NOLOCKF)
453		return (0);
454#ifdef LOCKF_DEBUG
455	if (unlock->lf_type != F_UNLCK)
456		panic("lf_clearlock: bad type");
457	if (lockf_debug & 1)
458		lf_print("lf_clearlock", unlock);
459#endif /* LOCKF_DEBUG */
460	prev = head;
461	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
462		/*
463		 * Wakeup the list of locks to be retried.
464		 */
465		lf_wakelock(overlap);
466
467		switch (ovcase) {
468
469		case 1: /* overlap == lock */
470			*prev = overlap->lf_next;
471			FREE(overlap, M_LOCKF);
472			break;
473
474		case 2: /* overlap contains lock: split it */
475			if (overlap->lf_start == unlock->lf_start) {
476				overlap->lf_start = unlock->lf_end + 1;
477				break;
478			}
479			lf_split(overlap, unlock);
480			overlap->lf_next = unlock->lf_next;
481			break;
482
483		case 3: /* lock contains overlap */
484			*prev = overlap->lf_next;
485			lf = overlap->lf_next;
486			free(overlap, M_LOCKF);
487			continue;
488
489		case 4: /* overlap starts before lock */
490			overlap->lf_end = unlock->lf_start - 1;
491			prev = &overlap->lf_next;
492			lf = overlap->lf_next;
493			continue;
494
495		case 5: /* overlap ends after lock */
496			overlap->lf_start = unlock->lf_end + 1;
497			break;
498		}
499		break;
500	}
501#ifdef LOCKF_DEBUG
502	if (lockf_debug & 1)
503		lf_printlist("lf_clearlock", unlock);
504#endif /* LOCKF_DEBUG */
505	return (0);
506}
507
508/*
509 * Check whether there is a blocking lock,
510 * and if so return its process identifier.
511 */
512static int
513lf_getlock(lock, fl)
514	register struct lockf *lock;
515	register struct flock *fl;
516{
517	register struct lockf *block;
518
519#ifdef LOCKF_DEBUG
520	if (lockf_debug & 1)
521		lf_print("lf_getlock", lock);
522#endif /* LOCKF_DEBUG */
523
524	if ((block = lf_getblock(lock))) {
525		fl->l_type = block->lf_type;
526		fl->l_whence = SEEK_SET;
527		fl->l_start = block->lf_start;
528		if (block->lf_end == -1)
529			fl->l_len = 0;
530		else
531			fl->l_len = block->lf_end - block->lf_start + 1;
532		if (block->lf_flags & F_POSIX)
533			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
534		else
535			fl->l_pid = -1;
536	} else {
537		fl->l_type = F_UNLCK;
538	}
539	return (0);
540}
541
542/*
543 * Walk the list of locks for an inode and
544 * return the first blocking lock.
545 */
546static struct lockf *
547lf_getblock(lock)
548	register struct lockf *lock;
549{
550	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
551	int ovcase;
552
553	prev = lock->lf_head;
554	while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
555		/*
556		 * We've found an overlap, see if it blocks us
557		 */
558		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
559			return (overlap);
560		/*
561		 * Nope, point to the next one on the list and
562		 * see if it blocks us
563		 */
564		lf = overlap->lf_next;
565	}
566	return (NOLOCKF);
567}
568
569/*
570 * Walk the list of locks for an inode to
571 * find an overlapping lock (if any).
572 *
573 * NOTE: this returns only the FIRST overlapping lock.  There
574 *	 may be more than one.
575 */
576static int
577lf_findoverlap(lf, lock, type, prev, overlap)
578	register struct lockf *lf;
579	struct lockf *lock;
580	int type;
581	struct lockf ***prev;
582	struct lockf **overlap;
583{
584	off_t start, end;
585
586	*overlap = lf;
587	if (lf == NOLOCKF)
588		return (0);
589#ifdef LOCKF_DEBUG
590	if (lockf_debug & 2)
591		lf_print("lf_findoverlap: looking for overlap in", lock);
592#endif /* LOCKF_DEBUG */
593	start = lock->lf_start;
594	end = lock->lf_end;
595	while (lf != NOLOCKF) {
596		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
597		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
598			*prev = &lf->lf_next;
599			*overlap = lf = lf->lf_next;
600			continue;
601		}
602#ifdef LOCKF_DEBUG
603		if (lockf_debug & 2)
604			lf_print("\tchecking", lf);
605#endif /* LOCKF_DEBUG */
606		/*
607		 * OK, check for overlap
608		 *
609		 * Six cases:
610		 *	0) no overlap
611		 *	1) overlap == lock
612		 *	2) overlap contains lock
613		 *	3) lock contains overlap
614		 *	4) overlap starts before lock
615		 *	5) overlap ends after lock
616		 */
617		if ((lf->lf_end != -1 && start > lf->lf_end) ||
618		    (end != -1 && lf->lf_start > end)) {
619			/* Case 0 */
620#ifdef LOCKF_DEBUG
621			if (lockf_debug & 2)
622				printf("no overlap\n");
623#endif /* LOCKF_DEBUG */
624			if ((type & SELF) && end != -1 && lf->lf_start > end)
625				return (0);
626			*prev = &lf->lf_next;
627			*overlap = lf = lf->lf_next;
628			continue;
629		}
630		if ((lf->lf_start == start) && (lf->lf_end == end)) {
631			/* Case 1 */
632#ifdef LOCKF_DEBUG
633			if (lockf_debug & 2)
634				printf("overlap == lock\n");
635#endif /* LOCKF_DEBUG */
636			return (1);
637		}
638		if ((lf->lf_start <= start) &&
639		    (end != -1) &&
640		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
641			/* Case 2 */
642#ifdef LOCKF_DEBUG
643			if (lockf_debug & 2)
644				printf("overlap contains lock\n");
645#endif /* LOCKF_DEBUG */
646			return (2);
647		}
648		if (start <= lf->lf_start &&
649		           (end == -1 ||
650			   (lf->lf_end != -1 && end >= lf->lf_end))) {
651			/* Case 3 */
652#ifdef LOCKF_DEBUG
653			if (lockf_debug & 2)
654				printf("lock contains overlap\n");
655#endif /* LOCKF_DEBUG */
656			return (3);
657		}
658		if ((lf->lf_start < start) &&
659			((lf->lf_end >= start) || (lf->lf_end == -1))) {
660			/* Case 4 */
661#ifdef LOCKF_DEBUG
662			if (lockf_debug & 2)
663				printf("overlap starts before lock\n");
664#endif /* LOCKF_DEBUG */
665			return (4);
666		}
667		if ((lf->lf_start > start) &&
668			(end != -1) &&
669			((lf->lf_end > end) || (lf->lf_end == -1))) {
670			/* Case 5 */
671#ifdef LOCKF_DEBUG
672			if (lockf_debug & 2)
673				printf("overlap ends after lock\n");
674#endif /* LOCKF_DEBUG */
675			return (5);
676		}
677		panic("lf_findoverlap: default");
678	}
679	return (0);
680}
681
682/*
683 * Split a lock and a contained region into
684 * two or three locks as necessary.
685 */
686static void
687lf_split(lock1, lock2)
688	register struct lockf *lock1;
689	register struct lockf *lock2;
690{
691	register struct lockf *splitlock;
692
693#ifdef LOCKF_DEBUG
694	if (lockf_debug & 2) {
695		lf_print("lf_split", lock1);
696		lf_print("splitting from", lock2);
697	}
698#endif /* LOCKF_DEBUG */
699	/*
700	 * Check to see if spliting into only two pieces.
701	 */
702	if (lock1->lf_start == lock2->lf_start) {
703		lock1->lf_start = lock2->lf_end + 1;
704		lock2->lf_next = lock1;
705		return;
706	}
707	if (lock1->lf_end == lock2->lf_end) {
708		lock1->lf_end = lock2->lf_start - 1;
709		lock2->lf_next = lock1->lf_next;
710		lock1->lf_next = lock2;
711		return;
712	}
713	/*
714	 * Make a new lock consisting of the last part of
715	 * the encompassing lock
716	 */
717	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
718	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
719	splitlock->lf_start = lock2->lf_end + 1;
720	TAILQ_INIT(&splitlock->lf_blkhd);
721	lock1->lf_end = lock2->lf_start - 1;
722	/*
723	 * OK, now link it in
724	 */
725	splitlock->lf_next = lock1->lf_next;
726	lock2->lf_next = splitlock;
727	lock1->lf_next = lock2;
728}
729
730/*
731 * Wakeup a blocklist
732 */
733static void
734lf_wakelock(listhead)
735	struct lockf *listhead;
736{
737	register struct lockf *wakelock;
738
739	while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
740		wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
741		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
742		wakelock->lf_next = NOLOCKF;
743#ifdef LOCKF_DEBUG
744		if (lockf_debug & 2)
745			lf_print("lf_wakelock: awakening", wakelock);
746#endif /* LOCKF_DEBUG */
747		wakeup((caddr_t)wakelock);
748	}
749}
750
751#ifdef LOCKF_DEBUG
752/*
753 * Print out a lock.
754 */
755void
756lf_print(tag, lock)
757	char *tag;
758	register struct lockf *lock;
759{
760
761	printf("%s: lock %p for ", tag, (void *)lock);
762	if (lock->lf_flags & F_POSIX)
763		printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
764	else
765		printf("id %p", (void *)lock->lf_id);
766	/* XXX no %qd in kernel.  Truncate. */
767	printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld",
768	    (u_long)lock->lf_inode->i_number,
769	    major(lock->lf_inode->i_dev),
770	    minor(lock->lf_inode->i_dev),
771	    lock->lf_type == F_RDLCK ? "shared" :
772	    lock->lf_type == F_WRLCK ? "exclusive" :
773	    lock->lf_type == F_UNLCK ? "unlock" :
774	    "unknown", (long)lock->lf_start, (long)lock->lf_end);
775	if (!TAILQ_EMPTY(&lock->lf_blkhd))
776		printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
777	else
778		printf("\n");
779}
780
781void
782lf_printlist(tag, lock)
783	char *tag;
784	struct lockf *lock;
785{
786	register struct lockf *lf, *blk;
787
788	printf("%s: Lock list for ino %lu on dev <%d, %d>:\n",
789	    tag, (u_long)lock->lf_inode->i_number,
790	    major(lock->lf_inode->i_dev),
791	    minor(lock->lf_inode->i_dev));
792	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
793		printf("\tlock %p for ",(void *)lf);
794		if (lf->lf_flags & F_POSIX)
795			printf("proc %ld",
796			    (long)((struct proc *)lf->lf_id)->p_pid);
797		else
798			printf("id %p", (void *)lf->lf_id);
799		/* XXX no %qd in kernel.  Truncate. */
800		printf(", %s, start %ld, end %ld",
801		    lf->lf_type == F_RDLCK ? "shared" :
802		    lf->lf_type == F_WRLCK ? "exclusive" :
803		    lf->lf_type == F_UNLCK ? "unlock" :
804		    "unknown", (long)lf->lf_start, (long)lf->lf_end);
805		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
806			printf("\n\t\tlock request %p for ", (void *)blk);
807			if (blk->lf_flags & F_POSIX)
808				printf("proc %ld",
809				    (long)((struct proc *)blk->lf_id)->p_pid);
810			else
811				printf("id %p", (void *)blk->lf_id);
812			/* XXX no %qd in kernel.  Truncate. */
813			printf(", %s, start %ld, end %ld",
814			    blk->lf_type == F_RDLCK ? "shared" :
815			    blk->lf_type == F_WRLCK ? "exclusive" :
816			    blk->lf_type == F_UNLCK ? "unlock" :
817			    "unknown", (long)blk->lf_start,
818			    (long)blk->lf_end);
819			if (!TAILQ_EMPTY(&blk->lf_blkhd))
820				panic("lf_printlist: bad list");
821		}
822		printf("\n");
823	}
824}
825#endif /* LOCKF_DEBUG */
826