vfs_lockf.c revision 1.16
1/*	$NetBSD: vfs_lockf.c,v 1.16 2000/06/12 14:33:06 sommerfeld Exp $	*/
2
3/*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)ufs_lockf.c	8.4 (Berkeley) 10/26/94
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/file.h>
45#include <sys/proc.h>
46#include <sys/vnode.h>
47#include <sys/malloc.h>
48#include <sys/fcntl.h>
49#include <sys/lockf.h>
50
51/*
52 * This variable controls the maximum number of processes that will
53 * be checked in doing deadlock detection.
54 */
55int maxlockdepth = MAXDEPTH;
56
57#ifdef LOCKF_DEBUG
58int	lockf_debug = 0;
59#endif
60
61#define NOLOCKF (struct lockf *)0
62#define SELF	0x1
63#define OTHERS	0x2
64
65/*
66 * XXX TODO
67 * Misc cleanups: "caddr_t id" should be visible in the API as a
68 * "struct proc *".
69 * (This requires rototilling all VFS's which support advisory locking).
70 *
71 * Use pools for lock allocation.
72 */
73
74/*
75 * XXXSMP TODO: Using either (a) a global lock, or (b) the vnode's
76 * interlock should be sufficient; (b) requires a change to the API
77 * because the vnode isn't visible here.
78 *
79 * If there's a lot of lock contention on a single vnode, locking
80 * schemes which allow for more paralleism would be needed.  Given how
81 * infrequently byte-range locks are actually used in typical BSD
82 * code, a more complex approach probably isn't worth it.
83 */
84
85/*
86 * Do an advisory lock operation.
87 */
88int
89lf_advlock(head, size, id, op, fl, flags)
90	struct lockf **head;
91	off_t size;
92	caddr_t id;
93	int op;
94	struct flock *fl;
95	int flags;
96{
97	struct lockf *lock;
98	off_t start, end;
99	int error;
100
101	/*
102	 * Convert the flock structure into a start and end.
103	 */
104	switch (fl->l_whence) {
105	case SEEK_SET:
106	case SEEK_CUR:
107		/*
108		 * Caller is responsible for adding any necessary offset
109		 * when SEEK_CUR is used.
110		 */
111		start = fl->l_start;
112		break;
113
114	case SEEK_END:
115		start = size + fl->l_start;
116		break;
117
118	default:
119		return (EINVAL);
120	}
121	if (start < 0)
122		return (EINVAL);
123
124	/*
125	 * Avoid the common case of unlocking when inode has no locks.
126	 */
127	if (*head == (struct lockf *)0) {
128		if (op != F_SETLK) {
129			fl->l_type = F_UNLCK;
130			return (0);
131		}
132	}
133
134	if (fl->l_len == 0)
135		end = -1;
136	else
137		end = start + fl->l_len - 1;
138	/*
139	 * Create the lockf structure.
140	 */
141	MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
142	lock->lf_start = start;
143	lock->lf_end = end;
144	lock->lf_id = id;
145	lock->lf_head = head;
146	lock->lf_type = fl->l_type;
147	lock->lf_next = (struct lockf *)0;
148	TAILQ_INIT(&lock->lf_blkhd);
149	lock->lf_flags = flags;
150	/*
151	 * Do the requested operation.
152	 */
153	switch (op) {
154
155	case F_SETLK:
156		return (lf_setlock(lock));
157
158	case F_UNLCK:
159		error = lf_clearlock(lock);
160		FREE(lock, M_LOCKF);
161		return (error);
162
163	case F_GETLK:
164		error = lf_getlock(lock, fl);
165		FREE(lock, M_LOCKF);
166		return (error);
167
168	default:
169		FREE(lock, M_LOCKF);
170		return (EINVAL);
171	}
172	/* NOTREACHED */
173}
174
175/*
176 * Set a byte-range lock.
177 */
178int
179lf_setlock(lock)
180	struct lockf *lock;
181{
182	struct lockf *block;
183	struct lockf **head = lock->lf_head;
184	struct lockf **prev, *overlap, *ltmp;
185	static char lockstr[] = "lockf";
186	int ovcase, priority, needtolink, error;
187
188#ifdef LOCKF_DEBUG
189	if (lockf_debug & 1)
190		lf_print("lf_setlock", lock);
191#endif /* LOCKF_DEBUG */
192
193	/*
194	 * Set the priority
195	 */
196	priority = PLOCK;
197	if (lock->lf_type == F_WRLCK)
198		priority += 4;
199	priority |= PCATCH;
200	/*
201	 * Scan lock list for this file looking for locks that would block us.
202	 */
203	while ((block = lf_getblock(lock)) != NULL) {
204		/*
205		 * Free the structure and return if nonblocking.
206		 */
207		if ((lock->lf_flags & F_WAIT) == 0) {
208			FREE(lock, M_LOCKF);
209			return (EAGAIN);
210		}
211		/*
212		 * We are blocked. Since flock style locks cover
213		 * the whole file, there is no chance for deadlock.
214		 * For byte-range locks we must check for deadlock.
215		 *
216		 * Deadlock detection is done by looking through the
217		 * wait channels to see if there are any cycles that
218		 * involve us. MAXDEPTH is set just to make sure we
219		 * do not go off into neverneverland.
220		 */
221		if ((lock->lf_flags & F_POSIX) &&
222		    (block->lf_flags & F_POSIX)) {
223			struct proc *wproc;
224			struct lockf *waitblock;
225			int i = 0;
226
227			/* The block is waiting on something */
228			wproc = (struct proc *)block->lf_id;
229			while (wproc->p_wchan &&
230			       (wproc->p_wmesg == lockstr) &&
231			       (i++ < maxlockdepth)) {
232				waitblock = (struct lockf *)wproc->p_wchan;
233				/* Get the owner of the blocking lock */
234				waitblock = waitblock->lf_next;
235				if ((waitblock->lf_flags & F_POSIX) == 0)
236					break;
237				wproc = (struct proc *)waitblock->lf_id;
238				if (wproc == (struct proc *)lock->lf_id) {
239					free(lock, M_LOCKF);
240					return (EDEADLK);
241				}
242			}
243			/*
244			 * If we're still following a dependancy chain
245			 * after maxlockdepth iterations, assume we're in
246			 * a cycle to be safe.
247			 */
248			if (i >= maxlockdepth) {
249				free(lock, M_LOCKF);
250				return (EDEADLK);
251			}
252		}
253		/*
254		 * For flock type locks, we must first remove
255		 * any shared locks that we hold before we sleep
256		 * waiting for an exclusive lock.
257		 */
258		if ((lock->lf_flags & F_FLOCK) &&
259		    lock->lf_type == F_WRLCK) {
260			lock->lf_type = F_UNLCK;
261			(void) lf_clearlock(lock);
262			lock->lf_type = F_WRLCK;
263		}
264		/*
265		 * Add our lock to the blocked list and sleep until we're free.
266		 * Remember who blocked us (for deadlock detection).
267		 */
268		lock->lf_next = block;
269		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
270#ifdef LOCKF_DEBUG
271		if (lockf_debug & 1) {
272			lf_print("lf_setlock: blocking on", block);
273			lf_printlist("lf_setlock", block);
274		}
275#endif /* LOCKF_DEBUG */
276		error = tsleep((caddr_t)lock, priority, lockstr, 0);
277
278		/*
279		 * We may have been awakened by a signal (in
280		 * which case we must remove ourselves from the
281		 * blocked list) and/or by another process
282		 * releasing a lock (in which case we have already
283		 * been removed from the blocked list and our
284		 * lf_next field set to NOLOCKF).
285		 */
286		if (lock->lf_next != NOLOCKF) {
287			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
288			lock->lf_next = NOLOCKF;
289		}
290		if (error) {
291			free(lock, M_LOCKF);
292			return (error);
293		}
294	}
295	/*
296	 * No blocks!!  Add the lock.  Note that we will
297	 * downgrade or upgrade any overlapping locks this
298	 * process already owns.
299	 *
300	 * Skip over locks owned by other processes.
301	 * Handle any locks that overlap and are owned by ourselves.
302	 */
303	prev = head;
304	block = *head;
305	needtolink = 1;
306	for (;;) {
307		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
308		if (ovcase)
309			block = overlap->lf_next;
310		/*
311		 * Six cases:
312		 *	0) no overlap
313		 *	1) overlap == lock
314		 *	2) overlap contains lock
315		 *	3) lock contains overlap
316		 *	4) overlap starts before lock
317		 *	5) overlap ends after lock
318		 */
319		switch (ovcase) {
320		case 0: /* no overlap */
321			if (needtolink) {
322				*prev = lock;
323				lock->lf_next = overlap;
324			}
325			break;
326
327		case 1: /* overlap == lock */
328			/*
329			 * If downgrading lock, others may be
330			 * able to acquire it.
331			 */
332			if (lock->lf_type == F_RDLCK &&
333			    overlap->lf_type == F_WRLCK)
334				lf_wakelock(overlap);
335			overlap->lf_type = lock->lf_type;
336			FREE(lock, M_LOCKF);
337			lock = overlap; /* for debug output below */
338			break;
339
340		case 2: /* overlap contains lock */
341			/*
342			 * Check for common starting point and different types.
343			 */
344			if (overlap->lf_type == lock->lf_type) {
345				free(lock, M_LOCKF);
346				lock = overlap; /* for debug output below */
347				break;
348			}
349			if (overlap->lf_start == lock->lf_start) {
350				*prev = lock;
351				lock->lf_next = overlap;
352				overlap->lf_start = lock->lf_end + 1;
353			} else
354				lf_split(overlap, lock);
355			lf_wakelock(overlap);
356			break;
357
358		case 3: /* lock contains overlap */
359			/*
360			 * If downgrading lock, others may be able to
361			 * acquire it, otherwise take the list.
362			 */
363			if (lock->lf_type == F_RDLCK &&
364			    overlap->lf_type == F_WRLCK) {
365				lf_wakelock(overlap);
366			} else {
367				while ((ltmp = overlap->lf_blkhd.tqh_first)) {
368					KASSERT(ltmp->lf_next == overlap);
369					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
370					    lf_block);
371					ltmp->lf_next = lock;
372					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
373					    ltmp, lf_block);
374				}
375			}
376			/*
377			 * Add the new lock if necessary and delete the overlap.
378			 */
379			if (needtolink) {
380				*prev = lock;
381				lock->lf_next = overlap->lf_next;
382				prev = &lock->lf_next;
383				needtolink = 0;
384			} else
385				*prev = overlap->lf_next;
386			free(overlap, M_LOCKF);
387			continue;
388
389		case 4: /* overlap starts before lock */
390			/*
391			 * Add lock after overlap on the list.
392			 */
393			lock->lf_next = overlap->lf_next;
394			overlap->lf_next = lock;
395			overlap->lf_end = lock->lf_start - 1;
396			prev = &lock->lf_next;
397			lf_wakelock(overlap);
398			needtolink = 0;
399			continue;
400
401		case 5: /* overlap ends after lock */
402			/*
403			 * Add the new lock before overlap.
404			 */
405			if (needtolink) {
406				*prev = lock;
407				lock->lf_next = overlap;
408			}
409			overlap->lf_start = lock->lf_end + 1;
410			lf_wakelock(overlap);
411			break;
412		}
413		break;
414	}
415#ifdef LOCKF_DEBUG
416	if (lockf_debug & 1) {
417		lf_print("lf_setlock: got the lock", lock);
418		lf_printlist("lf_setlock", lock);
419	}
420#endif /* LOCKF_DEBUG */
421	return (0);
422}
423
424/*
425 * Remove a byte-range lock on an inode.
426 *
427 * Generally, find the lock (or an overlap to that lock)
428 * and remove it (or shrink it), then wakeup anyone we can.
429 */
430int
431lf_clearlock(unlock)
432	struct lockf *unlock;
433{
434	struct lockf **head = unlock->lf_head;
435	struct lockf *lf = *head;
436	struct lockf *overlap, **prev;
437	int ovcase;
438
439	if (lf == NOLOCKF)
440		return (0);
441#ifdef LOCKF_DEBUG
442	if (unlock->lf_type != F_UNLCK)
443		panic("lf_clearlock: bad type");
444	if (lockf_debug & 1)
445		lf_print("lf_clearlock", unlock);
446#endif /* LOCKF_DEBUG */
447	prev = head;
448	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
449					&prev, &overlap)) != 0) {
450		/*
451		 * Wakeup the list of locks to be retried.
452		 */
453		lf_wakelock(overlap);
454
455		switch (ovcase) {
456
457		case 1: /* overlap == lock */
458			*prev = overlap->lf_next;
459			FREE(overlap, M_LOCKF);
460			break;
461
462		case 2: /* overlap contains lock: split it */
463			if (overlap->lf_start == unlock->lf_start) {
464				overlap->lf_start = unlock->lf_end + 1;
465				break;
466			}
467			lf_split(overlap, unlock);
468			overlap->lf_next = unlock->lf_next;
469			break;
470
471		case 3: /* lock contains overlap */
472			*prev = overlap->lf_next;
473			lf = overlap->lf_next;
474			free(overlap, M_LOCKF);
475			continue;
476
477		case 4: /* overlap starts before lock */
478			overlap->lf_end = unlock->lf_start - 1;
479			prev = &overlap->lf_next;
480			lf = overlap->lf_next;
481			continue;
482
483		case 5: /* overlap ends after lock */
484			overlap->lf_start = unlock->lf_end + 1;
485			break;
486		}
487		break;
488	}
489#ifdef LOCKF_DEBUG
490	if (lockf_debug & 1)
491		lf_printlist("lf_clearlock", unlock);
492#endif /* LOCKF_DEBUG */
493	return (0);
494}
495
496/*
497 * Check whether there is a blocking lock,
498 * and if so return its process identifier.
499 */
500int
501lf_getlock(lock, fl)
502	struct lockf *lock;
503	struct flock *fl;
504{
505	struct lockf *block;
506
507#ifdef LOCKF_DEBUG
508	if (lockf_debug & 1)
509		lf_print("lf_getlock", lock);
510#endif /* LOCKF_DEBUG */
511
512	if ((block = lf_getblock(lock)) != NULL) {
513		fl->l_type = block->lf_type;
514		fl->l_whence = SEEK_SET;
515		fl->l_start = block->lf_start;
516		if (block->lf_end == -1)
517			fl->l_len = 0;
518		else
519			fl->l_len = block->lf_end - block->lf_start + 1;
520		if (block->lf_flags & F_POSIX)
521			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
522		else
523			fl->l_pid = -1;
524	} else {
525		fl->l_type = F_UNLCK;
526	}
527	return (0);
528}
529
530/*
531 * Walk the list of locks for an inode and
532 * return the first blocking lock.
533 */
534struct lockf *
535lf_getblock(lock)
536	struct lockf *lock;
537{
538	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
539	int ovcase;
540
541	prev = lock->lf_head;
542	while ((ovcase = lf_findoverlap(lf, lock, OTHERS,
543					&prev, &overlap)) != 0) {
544		/*
545		 * We've found an overlap, see if it blocks us
546		 */
547		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
548			return (overlap);
549		/*
550		 * Nope, point to the next one on the list and
551		 * see if it blocks us
552		 */
553		lf = overlap->lf_next;
554	}
555	return (NOLOCKF);
556}
557
558/*
559 * Walk the list of locks for an inode to
560 * find an overlapping lock (if any).
561 *
562 * NOTE: this returns only the FIRST overlapping lock.  There
563 *	 may be more than one.
564 */
565int
566lf_findoverlap(lf, lock, type, prev, overlap)
567	struct lockf *lf;
568	struct lockf *lock;
569	int type;
570	struct lockf ***prev;
571	struct lockf **overlap;
572{
573	off_t start, end;
574
575	*overlap = lf;
576	if (lf == NOLOCKF)
577		return (0);
578#ifdef LOCKF_DEBUG
579	if (lockf_debug & 2)
580		lf_print("lf_findoverlap: looking for overlap in", lock);
581#endif /* LOCKF_DEBUG */
582	start = lock->lf_start;
583	end = lock->lf_end;
584	while (lf != NOLOCKF) {
585		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
586		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
587			*prev = &lf->lf_next;
588			*overlap = lf = lf->lf_next;
589			continue;
590		}
591#ifdef LOCKF_DEBUG
592		if (lockf_debug & 2)
593			lf_print("\tchecking", lf);
594#endif /* LOCKF_DEBUG */
595		/*
596		 * OK, check for overlap
597		 *
598		 * Six cases:
599		 *	0) no overlap
600		 *	1) overlap == lock
601		 *	2) overlap contains lock
602		 *	3) lock contains overlap
603		 *	4) overlap starts before lock
604		 *	5) overlap ends after lock
605		 */
606		if ((lf->lf_end != -1 && start > lf->lf_end) ||
607		    (end != -1 && lf->lf_start > end)) {
608			/* Case 0 */
609#ifdef LOCKF_DEBUG
610			if (lockf_debug & 2)
611				printf("no overlap\n");
612#endif /* LOCKF_DEBUG */
613			if ((type & SELF) && end != -1 && lf->lf_start > end)
614				return (0);
615			*prev = &lf->lf_next;
616			*overlap = lf = lf->lf_next;
617			continue;
618		}
619		if ((lf->lf_start == start) && (lf->lf_end == end)) {
620			/* Case 1 */
621#ifdef LOCKF_DEBUG
622			if (lockf_debug & 2)
623				printf("overlap == lock\n");
624#endif /* LOCKF_DEBUG */
625			return (1);
626		}
627		if ((lf->lf_start <= start) &&
628		    (end != -1) &&
629		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
630			/* Case 2 */
631#ifdef LOCKF_DEBUG
632			if (lockf_debug & 2)
633				printf("overlap contains lock\n");
634#endif /* LOCKF_DEBUG */
635			return (2);
636		}
637		if (start <= lf->lf_start &&
638		           (end == -1 ||
639			   (lf->lf_end != -1 && end >= lf->lf_end))) {
640			/* Case 3 */
641#ifdef LOCKF_DEBUG
642			if (lockf_debug & 2)
643				printf("lock contains overlap\n");
644#endif /* LOCKF_DEBUG */
645			return (3);
646		}
647		if ((lf->lf_start < start) &&
648			((lf->lf_end >= start) || (lf->lf_end == -1))) {
649			/* Case 4 */
650#ifdef LOCKF_DEBUG
651			if (lockf_debug & 2)
652				printf("overlap starts before lock\n");
653#endif /* LOCKF_DEBUG */
654			return (4);
655		}
656		if ((lf->lf_start > start) &&
657			(end != -1) &&
658			((lf->lf_end > end) || (lf->lf_end == -1))) {
659			/* Case 5 */
660#ifdef LOCKF_DEBUG
661			if (lockf_debug & 2)
662				printf("overlap ends after lock\n");
663#endif /* LOCKF_DEBUG */
664			return (5);
665		}
666		panic("lf_findoverlap: default");
667	}
668	return (0);
669}
670
671/*
672 * Split a lock and a contained region into
673 * two or three locks as necessary.
674 */
675void
676lf_split(lock1, lock2)
677	struct lockf *lock1;
678	struct lockf *lock2;
679{
680	struct lockf *splitlock;
681
682#ifdef LOCKF_DEBUG
683	if (lockf_debug & 2) {
684		lf_print("lf_split", lock1);
685		lf_print("splitting from", lock2);
686	}
687#endif /* LOCKF_DEBUG */
688	/*
689	 * Check to see if spliting into only two pieces.
690	 */
691	if (lock1->lf_start == lock2->lf_start) {
692		lock1->lf_start = lock2->lf_end + 1;
693		lock2->lf_next = lock1;
694		return;
695	}
696	if (lock1->lf_end == lock2->lf_end) {
697		lock1->lf_end = lock2->lf_start - 1;
698		lock2->lf_next = lock1->lf_next;
699		lock1->lf_next = lock2;
700		return;
701	}
702	/*
703	 * Make a new lock consisting of the last part of
704	 * the encompassing lock
705	 */
706	MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
707	memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
708	splitlock->lf_start = lock2->lf_end + 1;
709	TAILQ_INIT(&splitlock->lf_blkhd);
710	lock1->lf_end = lock2->lf_start - 1;
711	/*
712	 * OK, now link it in
713	 */
714	splitlock->lf_next = lock1->lf_next;
715	lock2->lf_next = splitlock;
716	lock1->lf_next = lock2;
717}
718
719/*
720 * Wakeup a blocklist
721 */
722void
723lf_wakelock(listhead)
724	struct lockf *listhead;
725{
726	struct lockf *wakelock;
727
728	while ((wakelock = listhead->lf_blkhd.tqh_first)) {
729		KASSERT(wakelock->lf_next == listhead);
730		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
731		wakelock->lf_next = NOLOCKF;
732#ifdef LOCKF_DEBUG
733		if (lockf_debug & 2)
734			lf_print("lf_wakelock: awakening", wakelock);
735#endif
736		wakeup((caddr_t)wakelock);
737	}
738}
739
740#ifdef LOCKF_DEBUG
741/*
742 * Print out a lock.
743 */
744void
745lf_print(tag, lock)
746	char *tag;
747	struct lockf *lock;
748{
749
750	printf("%s: lock %p for ", tag, lock);
751	if (lock->lf_flags & F_POSIX)
752		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
753	else
754		printf("id 0x%p", lock->lf_id);
755	printf(" %s, start %qx, end %qx",
756		lock->lf_type == F_RDLCK ? "shared" :
757		lock->lf_type == F_WRLCK ? "exclusive" :
758		lock->lf_type == F_UNLCK ? "unlock" :
759		"unknown", lock->lf_start, lock->lf_end);
760	if (lock->lf_blkhd.tqh_first)
761		printf(" block %p\n", lock->lf_blkhd.tqh_first);
762	else
763		printf("\n");
764}
765
766void
767lf_printlist(tag, lock)
768	char *tag;
769	struct lockf *lock;
770{
771	struct lockf *lf, *blk;
772
773	printf("%s: Lock list:\n", tag);
774	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
775		printf("\tlock %p for ", lf);
776		if (lf->lf_flags & F_POSIX)
777			printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
778		else
779			printf("id 0x%p", lf->lf_id);
780		printf(", %s, start %qx, end %qx",
781			lf->lf_type == F_RDLCK ? "shared" :
782			lf->lf_type == F_WRLCK ? "exclusive" :
783			lf->lf_type == F_UNLCK ? "unlock" :
784			"unknown", lf->lf_start, lf->lf_end);
785		for (blk = lf->lf_blkhd.tqh_first; blk;
786		     blk = blk->lf_block.tqe_next) {
787			if (blk->lf_flags & F_POSIX)
788				printf("proc %d",
789				    ((struct proc *)(blk->lf_id))->p_pid);
790			else
791				printf("id 0x%p", blk->lf_id);
792			printf(", %s, start %qx, end %qx",
793				blk->lf_type == F_RDLCK ? "shared" :
794				blk->lf_type == F_WRLCK ? "exclusive" :
795				blk->lf_type == F_UNLCK ? "unlock" :
796				"unknown", blk->lf_start, blk->lf_end);
797			if (blk->lf_blkhd.tqh_first)
798				 panic("lf_printlist: bad list");
799		}
800		printf("\n");
801	}
802}
803#endif /* LOCKF_DEBUG */
804