kern_lockf.c revision 22521
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
37 * $FreeBSD: head/sys/kern/kern_lockf.c 22521 1997-02-10 02:22:35Z dyson $
38 */
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/proc.h>
44#include <sys/unistd.h>
45#include <sys/vnode.h>
46#include <sys/malloc.h>
47#include <sys/fcntl.h>
48
49#include <sys/lockf.h>
50
51/*
52 * This variable controls the maximum number of processes that will
53 * be checked in doing deadlock detection.
54 */
55static int maxlockdepth = MAXDEPTH;
56
57#ifdef LOCKF_DEBUG
58#include <vm/vm.h>
59#include <sys/sysctl.h>
60int	lockf_debug = 0;
61SYSCTL_INT(_debug, 4, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
62#endif
63
64#define NOLOCKF (struct lockf *)0
65#define SELF	0x1
66#define OTHERS	0x2
67static int	 lf_clearlock __P((struct lockf *));
68static int	 lf_findoverlap __P((struct lockf *,
69	    struct lockf *, int, struct lockf ***, struct lockf **));
70static struct lockf *
71	 lf_getblock __P((struct lockf *));
72static int	 lf_getlock __P((struct lockf *, struct flock *));
73static int	 lf_setlock __P((struct lockf *));
74static void	 lf_split __P((struct lockf *, struct lockf *));
75static void	 lf_wakelock __P((struct lockf *));
76
77/*
78 * Advisory record locking support
79 */
80int
81lf_advlock(ap, head, size)
82	struct vop_advlock_args /* {
83		struct vnode *a_vp;
84		caddr_t  a_id;
85		int  a_op;
86		struct flock *a_fl;
87		int  a_flags;
88	} */ *ap;
89	struct lockf **head;
90	u_quad_t size;
91{
92	register struct flock *fl = ap->a_fl;
93	register struct lockf *lock;
94	off_t start, end;
95	int error;
96
97	/*
98	 * Convert the flock structure into a start and end.
99	 */
100	switch (fl->l_whence) {
101
102	case SEEK_SET:
103	case SEEK_CUR:
104		/*
105		 * Caller is responsible for adding any necessary offset
106		 * when SEEK_CUR is used.
107		 */
108		start = fl->l_start;
109		break;
110
111	case SEEK_END:
112		start = size + fl->l_start;
113		break;
114
115	default:
116		return (EINVAL);
117	}
118	if (start < 0)
119		return (EINVAL);
120	if (fl->l_len == 0)
121		end = -1;
122	else {
123		end = start + fl->l_len - 1;
124		if (end < start)
125			return (EINVAL);
126	}
127	/*
128	 * Avoid the common case of unlocking when inode has no locks.
129	 */
130	if (*head == (struct lockf *)0) {
131		if (ap->a_op != F_SETLK) {
132			fl->l_type = F_UNLCK;
133			return (0);
134		}
135	}
136	/*
137	 * Create the lockf structure
138	 */
139	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
140	lock->lf_start = start;
141	lock->lf_end = end;
142	lock->lf_id = ap->a_id;
143/*	lock->lf_inode = ip; */	/* XXX JH */
144	lock->lf_type = fl->l_type;
145	lock->lf_head = head;
146	lock->lf_next = (struct lockf *)0;
147	TAILQ_INIT(&lock->lf_blkhd);
148	lock->lf_flags = ap->a_flags;
149	/*
150	 * Do the requested operation.
151	 */
152	switch(ap->a_op) {
153	case F_SETLK:
154		return (lf_setlock(lock));
155
156	case F_UNLCK:
157		error = lf_clearlock(lock);
158		FREE(lock, M_LOCKF);
159		return (error);
160
161	case F_GETLK:
162		error = lf_getlock(lock, fl);
163		FREE(lock, M_LOCKF);
164		return (error);
165
166	default:
167		free(lock, M_LOCKF);
168		return (EINVAL);
169	}
170	/* NOTREACHED */
171}
172
173/*
174 * Set a byte-range lock.
175 */
176static int
177lf_setlock(lock)
178	register struct lockf *lock;
179{
180	register struct lockf *block;
181	struct lockf **head = lock->lf_head;
182	struct lockf **prev, *overlap, *ltmp;
183	static char lockstr[] = "lockf";
184	int ovcase, priority, needtolink, error;
185
186#ifdef LOCKF_DEBUG
187	if (lockf_debug & 1)
188		lf_print("lf_setlock", lock);
189#endif /* LOCKF_DEBUG */
190
191	/*
192	 * Set the priority
193	 */
194	priority = PLOCK;
195	if (lock->lf_type == F_WRLCK)
196		priority += 4;
197	priority |= PCATCH;
198	/*
199	 * Scan lock list for this file looking for locks that would block us.
200	 */
201	while ((block = lf_getblock(lock))) {
202		/*
203		 * Free the structure and return if nonblocking.
204		 */
205		if ((lock->lf_flags & F_WAIT) == 0) {
206			FREE(lock, M_LOCKF);
207			return (EAGAIN);
208		}
209		/*
210		 * We are blocked. Since flock style locks cover
211		 * the whole file, there is no chance for deadlock.
212		 * For byte-range locks we must check for deadlock.
213		 *
214		 * Deadlock detection is done by looking through the
215		 * wait channels to see if there are any cycles that
216		 * involve us. MAXDEPTH is set just to make sure we
217		 * do not go off into neverland.
218		 */
219		if ((lock->lf_flags & F_POSIX) &&
220		    (block->lf_flags & F_POSIX)) {
221			register struct proc *wproc;
222			register struct lockf *waitblock;
223			int i = 0;
224
225			/* The block is waiting on something */
226			wproc = (struct proc *)block->lf_id;
227			while (wproc->p_wchan &&
228			       (wproc->p_wmesg == lockstr) &&
229			       (i++ < maxlockdepth)) {
230				waitblock = (struct lockf *)wproc->p_wchan;
231				/* Get the owner of the blocking lock */
232				waitblock = waitblock->lf_next;
233				if ((waitblock->lf_flags & F_POSIX) == 0)
234					break;
235				wproc = (struct proc *)waitblock->lf_id;
236				if (wproc == (struct proc *)lock->lf_id) {
237					free(lock, M_LOCKF);
238					return (EDEADLK);
239				}
240			}
241		}
242		/*
243		 * For flock type locks, we must first remove
244		 * any shared locks that we hold before we sleep
245		 * waiting for an exclusive lock.
246		 */
247		if ((lock->lf_flags & F_FLOCK) &&
248		    lock->lf_type == F_WRLCK) {
249			lock->lf_type = F_UNLCK;
250			(void) lf_clearlock(lock);
251			lock->lf_type = F_WRLCK;
252		}
253		/*
254		 * Add our lock to the blocked list and sleep until we're free.
255		 * Remember who blocked us (for deadlock detection).
256		 */
257		lock->lf_next = block;
258		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
259#ifdef LOCKF_DEBUG
260		if (lockf_debug & 1) {
261			lf_print("lf_setlock: blocking on", block);
262			lf_printlist("lf_setlock", block);
263		}
264#endif /* LOCKF_DEBUG */
265		if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) {
266                        /*
267			 * We may have been awakened by a signal (in
268			 * which case we must remove ourselves from the
269			 * blocked list) and/or by another process
270			 * releasing a lock (in which case we have already
271			 * been removed from the blocked list and our
272			 * lf_next field set to NOLOCKF).
273                         */
274			if (lock->lf_next)
275				TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock,
276					lf_block);
277                        free(lock, M_LOCKF);
278                        return (error);
279		}
280	}
281	/*
282	 * No blocks!!  Add the lock.  Note that we will
283	 * downgrade or upgrade any overlapping locks this
284	 * process already owns.
285	 *
286	 * Skip over locks owned by other processes.
287	 * Handle any locks that overlap and are owned by ourselves.
288	 */
289	prev = head;
290	block = *head;
291	needtolink = 1;
292	for (;;) {
293		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
294		if (ovcase)
295			block = overlap->lf_next;
296		/*
297		 * Six cases:
298		 *	0) no overlap
299		 *	1) overlap == lock
300		 *	2) overlap contains lock
301		 *	3) lock contains overlap
302		 *	4) overlap starts before lock
303		 *	5) overlap ends after lock
304		 */
305		switch (ovcase) {
306		case 0: /* no overlap */
307			if (needtolink) {
308				*prev = lock;
309				lock->lf_next = overlap;
310			}
311			break;
312
313		case 1: /* overlap == lock */
314			/*
315			 * If downgrading lock, others may be
316			 * able to acquire it.
317			 */
318			if (lock->lf_type == F_RDLCK &&
319			    overlap->lf_type == F_WRLCK)
320				lf_wakelock(overlap);
321			overlap->lf_type = lock->lf_type;
322			FREE(lock, M_LOCKF);
323			lock = overlap; /* for debug output below */
324			break;
325
326		case 2: /* overlap contains lock */
327			/*
328			 * Check for common starting point and different types.
329			 */
330			if (overlap->lf_type == lock->lf_type) {
331				free(lock, M_LOCKF);
332				lock = overlap; /* for debug output below */
333				break;
334			}
335			if (overlap->lf_start == lock->lf_start) {
336				*prev = lock;
337				lock->lf_next = overlap;
338				overlap->lf_start = lock->lf_end + 1;
339			} else
340				lf_split(overlap, lock);
341			lf_wakelock(overlap);
342			break;
343
344		case 3: /* lock contains overlap */
345			/*
346			 * If downgrading lock, others may be able to
347			 * acquire it, otherwise take the list.
348			 */
349			if (lock->lf_type == F_RDLCK &&
350			    overlap->lf_type == F_WRLCK) {
351				lf_wakelock(overlap);
352			} else {
353				while (ltmp = overlap->lf_blkhd.tqh_first) {
354					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
355					    lf_block);
356					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
357					    ltmp, lf_block);
358				}
359			}
360			/*
361			 * Add the new lock if necessary and delete the overlap.
362			 */
363			if (needtolink) {
364				*prev = lock;
365				lock->lf_next = overlap->lf_next;
366				prev = &lock->lf_next;
367				needtolink = 0;
368			} else
369				*prev = overlap->lf_next;
370			free(overlap, M_LOCKF);
371			continue;
372
373		case 4: /* overlap starts before lock */
374			/*
375			 * Add lock after overlap on the list.
376			 */
377			lock->lf_next = overlap->lf_next;
378			overlap->lf_next = lock;
379			overlap->lf_end = lock->lf_start - 1;
380			prev = &lock->lf_next;
381			lf_wakelock(overlap);
382			needtolink = 0;
383			continue;
384
385		case 5: /* overlap ends after lock */
386			/*
387			 * Add the new lock before overlap.
388			 */
389			if (needtolink) {
390				*prev = lock;
391				lock->lf_next = overlap;
392			}
393			overlap->lf_start = lock->lf_end + 1;
394			lf_wakelock(overlap);
395			break;
396		}
397		break;
398	}
399#ifdef LOCKF_DEBUG
400	if (lockf_debug & 1) {
401		lf_print("lf_setlock: got the lock", lock);
402		lf_printlist("lf_setlock", lock);
403	}
404#endif /* LOCKF_DEBUG */
405	return (0);
406}
407
408/*
409 * Remove a byte-range lock on an inode.
410 *
411 * Generally, find the lock (or an overlap to that lock)
412 * and remove it (or shrink it), then wakeup anyone we can.
413 */
414static int
415lf_clearlock(unlock)
416	register struct lockf *unlock;
417{
418	struct lockf **head = unlock->lf_head;
419	register struct lockf *lf = *head;
420	struct lockf *overlap, **prev;
421	int ovcase;
422
423	if (lf == NOLOCKF)
424		return (0);
425#ifdef LOCKF_DEBUG
426	if (unlock->lf_type != F_UNLCK)
427		panic("lf_clearlock: bad type");
428	if (lockf_debug & 1)
429		lf_print("lf_clearlock", unlock);
430#endif /* LOCKF_DEBUG */
431	prev = head;
432	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
433		/*
434		 * Wakeup the list of locks to be retried.
435		 */
436		lf_wakelock(overlap);
437
438		switch (ovcase) {
439
440		case 1: /* overlap == lock */
441			*prev = overlap->lf_next;
442			FREE(overlap, M_LOCKF);
443			break;
444
445		case 2: /* overlap contains lock: split it */
446			if (overlap->lf_start == unlock->lf_start) {
447				overlap->lf_start = unlock->lf_end + 1;
448				break;
449			}
450			lf_split(overlap, unlock);
451			overlap->lf_next = unlock->lf_next;
452			break;
453
454		case 3: /* lock contains overlap */
455			*prev = overlap->lf_next;
456			lf = overlap->lf_next;
457			free(overlap, M_LOCKF);
458			continue;
459
460		case 4: /* overlap starts before lock */
461			overlap->lf_end = unlock->lf_start - 1;
462			prev = &overlap->lf_next;
463			lf = overlap->lf_next;
464			continue;
465
466		case 5: /* overlap ends after lock */
467			overlap->lf_start = unlock->lf_end + 1;
468			break;
469		}
470		break;
471	}
472#ifdef LOCKF_DEBUG
473	if (lockf_debug & 1)
474		lf_printlist("lf_clearlock", unlock);
475#endif /* LOCKF_DEBUG */
476	return (0);
477}
478
479/*
480 * Check whether there is a blocking lock,
481 * and if so return its process identifier.
482 */
483static int
484lf_getlock(lock, fl)
485	register struct lockf *lock;
486	register struct flock *fl;
487{
488	register struct lockf *block;
489
490#ifdef LOCKF_DEBUG
491	if (lockf_debug & 1)
492		lf_print("lf_getlock", lock);
493#endif /* LOCKF_DEBUG */
494
495	if ((block = lf_getblock(lock))) {
496		fl->l_type = block->lf_type;
497		fl->l_whence = SEEK_SET;
498		fl->l_start = block->lf_start;
499		if (block->lf_end == -1)
500			fl->l_len = 0;
501		else
502			fl->l_len = block->lf_end - block->lf_start + 1;
503		if (block->lf_flags & F_POSIX)
504			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
505		else
506			fl->l_pid = -1;
507	} else {
508		fl->l_type = F_UNLCK;
509	}
510	return (0);
511}
512
513/*
514 * Walk the list of locks for an inode and
515 * return the first blocking lock.
516 */
517static struct lockf *
518lf_getblock(lock)
519	register struct lockf *lock;
520{
521	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
522	int ovcase;
523
524	prev = lock->lf_head;
525	while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
526		/*
527		 * We've found an overlap, see if it blocks us
528		 */
529		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
530			return (overlap);
531		/*
532		 * Nope, point to the next one on the list and
533		 * see if it blocks us
534		 */
535		lf = overlap->lf_next;
536	}
537	return (NOLOCKF);
538}
539
540/*
541 * Walk the list of locks for an inode to
542 * find an overlapping lock (if any).
543 *
544 * NOTE: this returns only the FIRST overlapping lock.  There
545 *	 may be more than one.
546 */
547static int
548lf_findoverlap(lf, lock, type, prev, overlap)
549	register struct lockf *lf;
550	struct lockf *lock;
551	int type;
552	struct lockf ***prev;
553	struct lockf **overlap;
554{
555	off_t start, end;
556
557	*overlap = lf;
558	if (lf == NOLOCKF)
559		return (0);
560#ifdef LOCKF_DEBUG
561	if (lockf_debug & 2)
562		lf_print("lf_findoverlap: looking for overlap in", lock);
563#endif /* LOCKF_DEBUG */
564	start = lock->lf_start;
565	end = lock->lf_end;
566	while (lf != NOLOCKF) {
567		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
568		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
569			*prev = &lf->lf_next;
570			*overlap = lf = lf->lf_next;
571			continue;
572		}
573#ifdef LOCKF_DEBUG
574		if (lockf_debug & 2)
575			lf_print("\tchecking", lf);
576#endif /* LOCKF_DEBUG */
577		/*
578		 * OK, check for overlap
579		 *
580		 * Six cases:
581		 *	0) no overlap
582		 *	1) overlap == lock
583		 *	2) overlap contains lock
584		 *	3) lock contains overlap
585		 *	4) overlap starts before lock
586		 *	5) overlap ends after lock
587		 */
588		if ((lf->lf_end != -1 && start > lf->lf_end) ||
589		    (end != -1 && lf->lf_start > end)) {
590			/* Case 0 */
591#ifdef LOCKF_DEBUG
592			if (lockf_debug & 2)
593				printf("no overlap\n");
594#endif /* LOCKF_DEBUG */
595			if ((type & SELF) && end != -1 && lf->lf_start > end)
596				return (0);
597			*prev = &lf->lf_next;
598			*overlap = lf = lf->lf_next;
599			continue;
600		}
601		if ((lf->lf_start == start) && (lf->lf_end == end)) {
602			/* Case 1 */
603#ifdef LOCKF_DEBUG
604			if (lockf_debug & 2)
605				printf("overlap == lock\n");
606#endif /* LOCKF_DEBUG */
607			return (1);
608		}
609		if ((lf->lf_start <= start) &&
610		    (end != -1) &&
611		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
612			/* Case 2 */
613#ifdef LOCKF_DEBUG
614			if (lockf_debug & 2)
615				printf("overlap contains lock\n");
616#endif /* LOCKF_DEBUG */
617			return (2);
618		}
619		if (start <= lf->lf_start &&
620		           (end == -1 ||
621			   (lf->lf_end != -1 && end >= lf->lf_end))) {
622			/* Case 3 */
623#ifdef LOCKF_DEBUG
624			if (lockf_debug & 2)
625				printf("lock contains overlap\n");
626#endif /* LOCKF_DEBUG */
627			return (3);
628		}
629		if ((lf->lf_start < start) &&
630			((lf->lf_end >= start) || (lf->lf_end == -1))) {
631			/* Case 4 */
632#ifdef LOCKF_DEBUG
633			if (lockf_debug & 2)
634				printf("overlap starts before lock\n");
635#endif /* LOCKF_DEBUG */
636			return (4);
637		}
638		if ((lf->lf_start > start) &&
639			(end != -1) &&
640			((lf->lf_end > end) || (lf->lf_end == -1))) {
641			/* Case 5 */
642#ifdef LOCKF_DEBUG
643			if (lockf_debug & 2)
644				printf("overlap ends after lock\n");
645#endif /* LOCKF_DEBUG */
646			return (5);
647		}
648		panic("lf_findoverlap: default");
649	}
650	return (0);
651}
652
653/*
654 * Split a lock and a contained region into
655 * two or three locks as necessary.
656 */
657static void
658lf_split(lock1, lock2)
659	register struct lockf *lock1;
660	register struct lockf *lock2;
661{
662	register struct lockf *splitlock;
663
664#ifdef LOCKF_DEBUG
665	if (lockf_debug & 2) {
666		lf_print("lf_split", lock1);
667		lf_print("splitting from", lock2);
668	}
669#endif /* LOCKF_DEBUG */
670	/*
671	 * Check to see if spliting into only two pieces.
672	 */
673	if (lock1->lf_start == lock2->lf_start) {
674		lock1->lf_start = lock2->lf_end + 1;
675		lock2->lf_next = lock1;
676		return;
677	}
678	if (lock1->lf_end == lock2->lf_end) {
679		lock1->lf_end = lock2->lf_start - 1;
680		lock2->lf_next = lock1->lf_next;
681		lock1->lf_next = lock2;
682		return;
683	}
684	/*
685	 * Make a new lock consisting of the last part of
686	 * the encompassing lock
687	 */
688	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
689	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
690	splitlock->lf_start = lock2->lf_end + 1;
691	TAILQ_INIT(&splitlock->lf_blkhd);
692	lock1->lf_end = lock2->lf_start - 1;
693	/*
694	 * OK, now link it in
695	 */
696	splitlock->lf_next = lock1->lf_next;
697	lock2->lf_next = splitlock;
698	lock1->lf_next = lock2;
699}
700
701/*
702 * Wakeup a blocklist
703 */
704static void
705lf_wakelock(listhead)
706	struct lockf *listhead;
707{
708	register struct lockf *wakelock;
709
710	while (wakelock = listhead->lf_blkhd.tqh_first) {
711		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
712		wakelock->lf_next = NOLOCKF;
713#ifdef LOCKF_DEBUG
714		if (lockf_debug & 2)
715			lf_print("lf_wakelock: awakening", wakelock);
716#endif /* LOCKF_DEBUG */
717		wakeup((caddr_t)wakelock);
718	}
719}
720
721#ifdef LOCKF_DEBUG
722/*
723 * Print out a lock.
724 */
725lf_print(tag, lock)
726	char *tag;
727	register struct lockf *lock;
728{
729
730	printf("%s: lock 0x%lx for ", tag, lock);
731	if (lock->lf_flags & F_POSIX)
732		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
733	else
734		printf("id 0x%x", lock->lf_id);
735	printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
736		lock->lf_inode->i_number,
737		major(lock->lf_inode->i_dev),
738		minor(lock->lf_inode->i_dev),
739		lock->lf_type == F_RDLCK ? "shared" :
740		lock->lf_type == F_WRLCK ? "exclusive" :
741		lock->lf_type == F_UNLCK ? "unlock" :
742		"unknown", lock->lf_start, lock->lf_end);
743	if (lock->lf_blkhd.tqh_first)
744		printf(" block 0x%x\n", lock->lf_blkhd.tqh_first);
745	else
746		printf("\n");
747}
748
749lf_printlist(tag, lock)
750	char *tag;
751	struct lockf *lock;
752{
753	register struct lockf *lf, *blk;
754
755	printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
756		tag, lock->lf_inode->i_number,
757		major(lock->lf_inode->i_dev),
758		minor(lock->lf_inode->i_dev));
759	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
760		printf("\tlock 0x%lx for ", lf);
761		if (lf->lf_flags & F_POSIX)
762			printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
763		else
764			printf("id 0x%x", lf->lf_id);
765		printf(", %s, start %d, end %d",
766			lf->lf_type == F_RDLCK ? "shared" :
767			lf->lf_type == F_WRLCK ? "exclusive" :
768			lf->lf_type == F_UNLCK ? "unlock" :
769			"unknown", lf->lf_start, lf->lf_end);
770		for (blk = lf->lf_blkhd.tqh_first; blk;
771		     blk = blk->lf_block.tqe_next) {
772			printf("\n\t\tlock request 0x%lx for ", blk);
773			if (blk->lf_flags & F_POSIX)
774				printf("proc %d",
775				    ((struct proc *)(blk->lf_id))->p_pid);
776			else
777				printf("id 0x%x", blk->lf_id);
778			printf(", %s, start %d, end %d",
779				blk->lf_type == F_RDLCK ? "shared" :
780				blk->lf_type == F_WRLCK ? "exclusive" :
781				blk->lf_type == F_UNLCK ? "unlock" :
782				"unknown", blk->lf_start, blk->lf_end);
783			if (blk->lf_blkhd.tqh_first)
784				panic("lf_printlist: bad list");
785		}
786		printf("\n");
787	}
788}
789#endif /* LOCKF_DEBUG */
790