vfs_lockf.c revision 1.5
1/*	$NetBSD: vfs_lockf.c,v 1.5 1994/06/29 06:33:55 cgd Exp $	*/
2
3/*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/file.h>
45#include <sys/proc.h>
46#include <sys/vnode.h>
47#include <sys/malloc.h>
48#include <sys/fcntl.h>
49#include <sys/lockf.h>
50
51/*
52 * Do an advisory lock operation.
53 */
54int
55lf_advlock(head, size, id, op, fl, flags)
56	struct lockf **head;
57	off_t size;
58	caddr_t id;
59	int op;
60	register struct flock *fl;
61	int flags;
62{
63	register struct lockf *lock;
64	off_t start, end;
65	int error;
66
67	/*
68	 * Avoid the common case of unlocking when inode has no locks.
69	 */
70	if (*head == (struct lockf *)0) {
71		if (op != F_SETLK) {
72			fl->l_type = F_UNLCK;
73			return (0);
74		}
75	}
76	/*
77	 * Convert the flock structure into a start and end.
78	 */
79	switch (fl->l_whence) {
80
81	case SEEK_SET:
82	case SEEK_CUR:
83		/*
84		 * Caller is responsible for adding any necessary offset
85		 * when SEEK_CUR is used.
86		 */
87		start = fl->l_start;
88		break;
89
90	case SEEK_END:
91		start = size + fl->l_start;
92		break;
93
94	default:
95		return (EINVAL);
96	}
97	if (start < 0)
98		return (EINVAL);
99	if (fl->l_len == 0)
100		end = -1;
101	else
102		end = start + fl->l_len - 1;
103	/*
104	 * Create the lockf structure.
105	 */
106	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
107	lock->lf_start = start;
108	lock->lf_end = end;
109	lock->lf_id = id;
110	lock->lf_head = head;
111	lock->lf_type = fl->l_type;
112	lock->lf_next = (struct lockf *)0;
113	lock->lf_block = (struct lockf *)0;
114	lock->lf_flags = flags;
115	/*
116	 * Do the requested operation.
117	 */
118	switch (op) {
119
120	case F_SETLK:
121		return (lf_setlock(lock));
122
123	case F_UNLCK:
124		error = lf_clearlock(lock);
125		FREE(lock, M_LOCKF);
126		return (error);
127
128	case F_GETLK:
129		error = lf_getlock(lock, fl);
130		FREE(lock, M_LOCKF);
131		return (error);
132
133	default:
134		FREE(lock, M_LOCKF);
135		return (EINVAL);
136	}
137	/* NOTREACHED */
138}
139
140/*
141 * This variable controls the maximum number of processes that will
142 * be checked in doing deadlock detection.
143 */
144int maxlockdepth = MAXDEPTH;
145
146#ifdef LOCKF_DEBUG
147int	lockf_debug = 0;
148#endif
149
150#define NOLOCKF (struct lockf *)0
151#define SELF	0x1
152#define OTHERS	0x2
153
154/*
155 * Set a byte-range lock.
156 */
157int
158lf_setlock(lock)
159	register struct lockf *lock;
160{
161	register struct lockf *block;
162	struct lockf **head = lock->lf_head;
163	struct lockf **prev, *overlap, *ltmp;
164	static char lockstr[] = "lockf";
165	int ovcase, priority, needtolink, error;
166
167#ifdef LOCKF_DEBUG
168	if (lockf_debug & 1)
169		lf_print("lf_setlock", lock);
170#endif /* LOCKF_DEBUG */
171
172	/*
173	 * Set the priority
174	 */
175	priority = PLOCK;
176	if (lock->lf_type == F_WRLCK)
177		priority += 4;
178	priority |= PCATCH;
179	/*
180	 * Scan lock list for this file looking for locks that would block us.
181	 */
182	while (block = lf_getblock(lock)) {
183		/*
184		 * Free the structure and return if nonblocking.
185		 */
186		if ((lock->lf_flags & F_WAIT) == 0) {
187			FREE(lock, M_LOCKF);
188			return (EAGAIN);
189		}
190		/*
191		 * We are blocked. Since flock style locks cover
192		 * the whole file, there is no chance for deadlock.
193		 * For byte-range locks we must check for deadlock.
194		 *
195		 * Deadlock detection is done by looking through the
196		 * wait channels to see if there are any cycles that
197		 * involve us. MAXDEPTH is set just to make sure we
198		 * do not go off into neverland.
199		 */
200		if ((lock->lf_flags & F_POSIX) &&
201		    (block->lf_flags & F_POSIX)) {
202			register struct proc *wproc;
203			register struct lockf *waitblock;
204			int i = 0;
205
206			/* The block is waiting on something */
207			wproc = (struct proc *)block->lf_id;
208			while (wproc->p_wchan &&
209			       (wproc->p_wmesg == lockstr) &&
210			       (i++ < maxlockdepth)) {
211				waitblock = (struct lockf *)wproc->p_wchan;
212				/* Get the owner of the blocking lock */
213				waitblock = waitblock->lf_next;
214				if ((waitblock->lf_flags & F_POSIX) == 0)
215					break;
216				wproc = (struct proc *)waitblock->lf_id;
217				if (wproc == (struct proc *)lock->lf_id) {
218					free(lock, M_LOCKF);
219					return (EDEADLK);
220				}
221			}
222		}
223		/*
224		 * For flock type locks, we must first remove
225		 * any shared locks that we hold before we sleep
226		 * waiting for an exclusive lock.
227		 */
228		if ((lock->lf_flags & F_FLOCK) &&
229		    lock->lf_type == F_WRLCK) {
230			lock->lf_type = F_UNLCK;
231			(void) lf_clearlock(lock);
232			lock->lf_type = F_WRLCK;
233		}
234		/*
235		 * Add our lock to the blocked list and sleep until we're free.
236		 * Remember who blocked us (for deadlock detection).
237		 */
238		lock->lf_next = block;
239		lf_addblock(block, lock);
240#ifdef LOCKF_DEBUG
241		if (lockf_debug & 1) {
242			lf_print("lf_setlock: blocking on", block);
243			lf_printlist("lf_setlock", block);
244		}
245#endif /* LOCKF_DEBUG */
246		if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) {
247			/*
248			 * Delete ourselves from the waiting to lock list.
249			 */
250			for (block = lock->lf_next;
251			     block != NOLOCKF;
252			     block = block->lf_block) {
253				if (block->lf_block != lock)
254					continue;
255				block->lf_block = block->lf_block->lf_block;
256				break;
257			}
258			/*
259			 * If we did not find ourselves on the list, but
260			 * are still linked onto a lock list, then something
261			 * is very wrong.
262			 */
263			if (block == NOLOCKF && lock->lf_next != NOLOCKF)
264				panic("lf_setlock: lost lock");
265			free(lock, M_LOCKF);
266			return (error);
267		}
268	}
269	/*
270	 * No blocks!!  Add the lock.  Note that we will
271	 * downgrade or upgrade any overlapping locks this
272	 * process already owns.
273	 *
274	 * Skip over locks owned by other processes.
275	 * Handle any locks that overlap and are owned by ourselves.
276	 */
277	prev = head;
278	block = *head;
279	needtolink = 1;
280	for (;;) {
281		if (ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap))
282			block = overlap->lf_next;
283		/*
284		 * Six cases:
285		 *	0) no overlap
286		 *	1) overlap == lock
287		 *	2) overlap contains lock
288		 *	3) lock contains overlap
289		 *	4) overlap starts before lock
290		 *	5) overlap ends after lock
291		 */
292		switch (ovcase) {
293		case 0: /* no overlap */
294			if (needtolink) {
295				*prev = lock;
296				lock->lf_next = overlap;
297			}
298			break;
299
300		case 1: /* overlap == lock */
301			/*
302			 * If downgrading lock, others may be
303			 * able to acquire it.
304			 */
305			if (lock->lf_type == F_RDLCK &&
306			    overlap->lf_type == F_WRLCK)
307				lf_wakelock(overlap);
308			overlap->lf_type = lock->lf_type;
309			FREE(lock, M_LOCKF);
310			lock = overlap; /* for debug output below */
311			break;
312
313		case 2: /* overlap contains lock */
314			/*
315			 * Check for common starting point and different types.
316			 */
317			if (overlap->lf_type == lock->lf_type) {
318				free(lock, M_LOCKF);
319				lock = overlap; /* for debug output below */
320				break;
321			}
322			if (overlap->lf_start == lock->lf_start) {
323				*prev = lock;
324				lock->lf_next = overlap;
325				overlap->lf_start = lock->lf_end + 1;
326			} else
327				lf_split(overlap, lock);
328			lf_wakelock(overlap);
329			break;
330
331		case 3: /* lock contains overlap */
332			/*
333			 * If downgrading lock, others may be able to
334			 * acquire it, otherwise take the list.
335			 */
336			if (lock->lf_type == F_RDLCK &&
337			    overlap->lf_type == F_WRLCK) {
338				lf_wakelock(overlap);
339			} else {
340				ltmp = lock->lf_block;
341				lock->lf_block = overlap->lf_block;
342				lf_addblock(lock, ltmp);
343			}
344			/*
345			 * Add the new lock if necessary and delete the overlap.
346			 */
347			if (needtolink) {
348				*prev = lock;
349				lock->lf_next = overlap->lf_next;
350				prev = &lock->lf_next;
351				needtolink = 0;
352			} else
353				*prev = overlap->lf_next;
354			free(overlap, M_LOCKF);
355			continue;
356
357		case 4: /* overlap starts before lock */
358			/*
359			 * Add lock after overlap on the list.
360			 */
361			lock->lf_next = overlap->lf_next;
362			overlap->lf_next = lock;
363			overlap->lf_end = lock->lf_start - 1;
364			prev = &lock->lf_next;
365			lf_wakelock(overlap);
366			needtolink = 0;
367			continue;
368
369		case 5: /* overlap ends after lock */
370			/*
371			 * Add the new lock before overlap.
372			 */
373			if (needtolink) {
374				*prev = lock;
375				lock->lf_next = overlap;
376			}
377			overlap->lf_start = lock->lf_end + 1;
378			lf_wakelock(overlap);
379			break;
380		}
381		break;
382	}
383#ifdef LOCKF_DEBUG
384	if (lockf_debug & 1) {
385		lf_print("lf_setlock: got the lock", lock);
386		lf_printlist("lf_setlock", lock);
387	}
388#endif /* LOCKF_DEBUG */
389	return (0);
390}
391
392/*
393 * Remove a byte-range lock on an inode.
394 *
395 * Generally, find the lock (or an overlap to that lock)
396 * and remove it (or shrink it), then wakeup anyone we can.
397 */
398int
399lf_clearlock(unlock)
400	register struct lockf *unlock;
401{
402	struct lockf **head = unlock->lf_head;
403	register struct lockf *lf = *head;
404	struct lockf *overlap, **prev;
405	int ovcase;
406
407	if (lf == NOLOCKF)
408		return (0);
409#ifdef LOCKF_DEBUG
410	if (unlock->lf_type != F_UNLCK)
411		panic("lf_clearlock: bad type");
412	if (lockf_debug & 1)
413		lf_print("lf_clearlock", unlock);
414#endif /* LOCKF_DEBUG */
415	prev = head;
416	while (ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) {
417		/*
418		 * Wakeup the list of locks to be retried.
419		 */
420		lf_wakelock(overlap);
421
422		switch (ovcase) {
423
424		case 1: /* overlap == lock */
425			*prev = overlap->lf_next;
426			FREE(overlap, M_LOCKF);
427			break;
428
429		case 2: /* overlap contains lock: split it */
430			if (overlap->lf_start == unlock->lf_start) {
431				overlap->lf_start = unlock->lf_end + 1;
432				break;
433			}
434			lf_split(overlap, unlock);
435			overlap->lf_next = unlock->lf_next;
436			break;
437
438		case 3: /* lock contains overlap */
439			*prev = overlap->lf_next;
440			lf = overlap->lf_next;
441			free(overlap, M_LOCKF);
442			continue;
443
444		case 4: /* overlap starts before lock */
445			overlap->lf_end = unlock->lf_start - 1;
446			prev = &overlap->lf_next;
447			lf = overlap->lf_next;
448			continue;
449
450		case 5: /* overlap ends after lock */
451			overlap->lf_start = unlock->lf_end + 1;
452			break;
453		}
454		break;
455	}
456#ifdef LOCKF_DEBUG
457	if (lockf_debug & 1)
458		lf_printlist("lf_clearlock", unlock);
459#endif /* LOCKF_DEBUG */
460	return (0);
461}
462
463/*
464 * Check whether there is a blocking lock,
465 * and if so return its process identifier.
466 */
467int
468lf_getlock(lock, fl)
469	register struct lockf *lock;
470	register struct flock *fl;
471{
472	register struct lockf *block;
473
474#ifdef LOCKF_DEBUG
475	if (lockf_debug & 1)
476		lf_print("lf_getlock", lock);
477#endif /* LOCKF_DEBUG */
478
479	if (block = lf_getblock(lock)) {
480		fl->l_type = block->lf_type;
481		fl->l_whence = SEEK_SET;
482		fl->l_start = block->lf_start;
483		if (block->lf_end == -1)
484			fl->l_len = 0;
485		else
486			fl->l_len = block->lf_end - block->lf_start + 1;
487		if (block->lf_flags & F_POSIX)
488			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
489		else
490			fl->l_pid = -1;
491	} else {
492		fl->l_type = F_UNLCK;
493	}
494	return (0);
495}
496
497/*
498 * Walk the list of locks for an inode and
499 * return the first blocking lock.
500 */
501struct lockf *
502lf_getblock(lock)
503	register struct lockf *lock;
504{
505	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
506	int ovcase;
507
508	prev = lock->lf_head;
509	while (ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) {
510		/*
511		 * We've found an overlap, see if it blocks us
512		 */
513		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
514			return (overlap);
515		/*
516		 * Nope, point to the next one on the list and
517		 * see if it blocks us
518		 */
519		lf = overlap->lf_next;
520	}
521	return (NOLOCKF);
522}
523
524/*
525 * Walk the list of locks for an inode to
526 * find an overlapping lock (if any).
527 *
528 * NOTE: this returns only the FIRST overlapping lock.  There
529 *	 may be more than one.
530 */
531int
532lf_findoverlap(lf, lock, type, prev, overlap)
533	register struct lockf *lf;
534	struct lockf *lock;
535	int type;
536	struct lockf ***prev;
537	struct lockf **overlap;
538{
539	off_t start, end;
540
541	*overlap = lf;
542	if (lf == NOLOCKF)
543		return (0);
544#ifdef LOCKF_DEBUG
545	if (lockf_debug & 2)
546		lf_print("lf_findoverlap: looking for overlap in", lock);
547#endif /* LOCKF_DEBUG */
548	start = lock->lf_start;
549	end = lock->lf_end;
550	while (lf != NOLOCKF) {
551		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
552		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
553			*prev = &lf->lf_next;
554			*overlap = lf = lf->lf_next;
555			continue;
556		}
557#ifdef LOCKF_DEBUG
558		if (lockf_debug & 2)
559			lf_print("\tchecking", lf);
560#endif /* LOCKF_DEBUG */
561		/*
562		 * OK, check for overlap
563		 *
564		 * Six cases:
565		 *	0) no overlap
566		 *	1) overlap == lock
567		 *	2) overlap contains lock
568		 *	3) lock contains overlap
569		 *	4) overlap starts before lock
570		 *	5) overlap ends after lock
571		 */
572		if ((lf->lf_end != -1 && start > lf->lf_end) ||
573		    (end != -1 && lf->lf_start > end)) {
574			/* Case 0 */
575#ifdef LOCKF_DEBUG
576			if (lockf_debug & 2)
577				printf("no overlap\n");
578#endif /* LOCKF_DEBUG */
579			if ((type & SELF) && end != -1 && lf->lf_start > end)
580				return (0);
581			*prev = &lf->lf_next;
582			*overlap = lf = lf->lf_next;
583			continue;
584		}
585		if ((lf->lf_start == start) && (lf->lf_end == end)) {
586			/* Case 1 */
587#ifdef LOCKF_DEBUG
588			if (lockf_debug & 2)
589				printf("overlap == lock\n");
590#endif /* LOCKF_DEBUG */
591			return (1);
592		}
593		if ((lf->lf_start <= start) &&
594		    (end != -1) &&
595		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
596			/* Case 2 */
597#ifdef LOCKF_DEBUG
598			if (lockf_debug & 2)
599				printf("overlap contains lock\n");
600#endif /* LOCKF_DEBUG */
601			return (2);
602		}
603		if (start <= lf->lf_start &&
604		           (end == -1 ||
605			   (lf->lf_end != -1 && end >= lf->lf_end))) {
606			/* Case 3 */
607#ifdef LOCKF_DEBUG
608			if (lockf_debug & 2)
609				printf("lock contains overlap\n");
610#endif /* LOCKF_DEBUG */
611			return (3);
612		}
613		if ((lf->lf_start < start) &&
614			((lf->lf_end >= start) || (lf->lf_end == -1))) {
615			/* Case 4 */
616#ifdef LOCKF_DEBUG
617			if (lockf_debug & 2)
618				printf("overlap starts before lock\n");
619#endif /* LOCKF_DEBUG */
620			return (4);
621		}
622		if ((lf->lf_start > start) &&
623			(end != -1) &&
624			((lf->lf_end > end) || (lf->lf_end == -1))) {
625			/* Case 5 */
626#ifdef LOCKF_DEBUG
627			if (lockf_debug & 2)
628				printf("overlap ends after lock\n");
629#endif /* LOCKF_DEBUG */
630			return (5);
631		}
632		panic("lf_findoverlap: default");
633	}
634	return (0);
635}
636
637/*
638 * Add a lock to the end of the blocked list.
639 */
640void
641lf_addblock(lock, blocked)
642	struct lockf *lock;
643	struct lockf *blocked;
644{
645	register struct lockf *lf;
646
647	if (blocked == NOLOCKF)
648		return;
649#ifdef LOCKF_DEBUG
650	if (lockf_debug & 2) {
651		lf_print("addblock: adding", blocked);
652		lf_print("to blocked list of", lock);
653	}
654#endif /* LOCKF_DEBUG */
655	if ((lf = lock->lf_block) == NOLOCKF) {
656		lock->lf_block = blocked;
657		return;
658	}
659	while (lf->lf_block != NOLOCKF)
660		lf = lf->lf_block;
661	lf->lf_block = blocked;
662	return;
663}
664
665/*
666 * Split a lock and a contained region into
667 * two or three locks as necessary.
668 */
669void
670lf_split(lock1, lock2)
671	register struct lockf *lock1;
672	register struct lockf *lock2;
673{
674	register struct lockf *splitlock;
675
676#ifdef LOCKF_DEBUG
677	if (lockf_debug & 2) {
678		lf_print("lf_split", lock1);
679		lf_print("splitting from", lock2);
680	}
681#endif /* LOCKF_DEBUG */
682	/*
683	 * Check to see if spliting into only two pieces.
684	 */
685	if (lock1->lf_start == lock2->lf_start) {
686		lock1->lf_start = lock2->lf_end + 1;
687		lock2->lf_next = lock1;
688		return;
689	}
690	if (lock1->lf_end == lock2->lf_end) {
691		lock1->lf_end = lock2->lf_start - 1;
692		lock2->lf_next = lock1->lf_next;
693		lock1->lf_next = lock2;
694		return;
695	}
696	/*
697	 * Make a new lock consisting of the last part of
698	 * the encompassing lock
699	 */
700	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
701	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
702	splitlock->lf_start = lock2->lf_end + 1;
703	splitlock->lf_block = NOLOCKF;
704	lock1->lf_end = lock2->lf_start - 1;
705	/*
706	 * OK, now link it in
707	 */
708	splitlock->lf_next = lock1->lf_next;
709	lock2->lf_next = splitlock;
710	lock1->lf_next = lock2;
711}
712
713/*
714 * Wakeup a blocklist
715 */
716void
717lf_wakelock(listhead)
718	struct lockf *listhead;
719{
720	register struct lockf *blocklist, *wakelock;
721
722	blocklist = listhead->lf_block;
723	listhead->lf_block = NOLOCKF;
724	while (blocklist != NOLOCKF) {
725		wakelock = blocklist;
726		blocklist = blocklist->lf_block;
727		wakelock->lf_block = NOLOCKF;
728		wakelock->lf_next = NOLOCKF;
729#ifdef LOCKF_DEBUG
730		if (lockf_debug & 2)
731			lf_print("lf_wakelock: awakening", wakelock);
732#endif /* LOCKF_DEBUG */
733		wakeup((caddr_t)wakelock);
734	}
735}
736
737#ifdef LOCKF_DEBUG
738/*
739 * Print out a lock.
740 */
741void
742lf_print(tag, lock)
743	char *tag;
744	register struct lockf *lock;
745{
746
747	printf("%s: lock 0x%lx for ", tag, lock);
748	if (lock->lf_flags & F_POSIX)
749		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
750	else
751		printf("id 0x%x", lock->lf_id);
752	printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
753		lock->lf_inode->i_number,
754		major(lock->lf_inode->i_dev),
755		minor(lock->lf_inode->i_dev),
756		lock->lf_type == F_RDLCK ? "shared" :
757		lock->lf_type == F_WRLCK ? "exclusive" :
758		lock->lf_type == F_UNLCK ? "unlock" :
759		"unknown", lock->lf_start, lock->lf_end);
760	if (lock->lf_block)
761		printf(" block 0x%x\n", lock->lf_block);
762	else
763		printf("\n");
764}
765
766void
767lf_printlist(tag, lock)
768	char *tag;
769	struct lockf *lock;
770{
771	register struct lockf *lf;
772
773	printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
774		tag, lock->lf_inode->i_number,
775		major(lock->lf_inode->i_dev),
776		minor(lock->lf_inode->i_dev));
777	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
778		printf("\tlock 0x%lx for ", lf);
779		if (lf->lf_flags & F_POSIX)
780			printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
781		else
782			printf("id 0x%x", lf->lf_id);
783		printf(", %s, start %d, end %d",
784			lf->lf_type == F_RDLCK ? "shared" :
785			lf->lf_type == F_WRLCK ? "exclusive" :
786			lf->lf_type == F_UNLCK ? "unlock" :
787			"unknown", lf->lf_start, lf->lf_end);
788		if (lf->lf_block)
789			printf(" block 0x%x\n", lf->lf_block);
790		else
791			printf("\n");
792	}
793}
794#endif /* LOCKF_DEBUG */
795