kern_lockf.c revision 98998
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
37 * $FreeBSD: head/sys/kern/kern_lockf.c 98998 2002-06-29 00:29:12Z alfred $
38 */
39
40#include "opt_debug_lockf.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/unistd.h>
49#include <sys/vnode.h>
50#include <sys/malloc.h>
51#include <sys/fcntl.h>
52#include <sys/lockf.h>
53
54#include <machine/limits.h>
55
56/*
57 * This variable controls the maximum number of processes that will
58 * be checked in doing deadlock detection.
59 */
60static int maxlockdepth = MAXDEPTH;
61
62#ifdef LOCKF_DEBUG
63#include <sys/kernel.h>
64#include <sys/sysctl.h>
65
66#include <ufs/ufs/quota.h>
67#include <ufs/ufs/inode.h>
68
69
70static int	lockf_debug = 0;
71SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
72#endif
73
74MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
75
76#define NOLOCKF (struct lockf *)0
77#define SELF	0x1
78#define OTHERS	0x2
79static int	 lf_clearlock(struct lockf *);
80static int	 lf_findoverlap(struct lockf *,
81	    struct lockf *, int, struct lockf ***, struct lockf **);
82static struct lockf *
83	 lf_getblock(struct lockf *);
84static int	 lf_getlock(struct lockf *, struct flock *);
85static int	 lf_setlock(struct lockf *);
86static void	 lf_split(struct lockf *, struct lockf *);
87static void	 lf_wakelock(struct lockf *);
88
89/*
90 * Advisory record locking support
91 */
92int
93lf_advlock(ap, head, size)
94	struct vop_advlock_args /* {
95		struct vnode *a_vp;
96		caddr_t  a_id;
97		int  a_op;
98		struct flock *a_fl;
99		int  a_flags;
100	} */ *ap;
101	struct lockf **head;
102	u_quad_t size;
103{
104	register struct flock *fl = ap->a_fl;
105	register struct lockf *lock;
106	off_t start, end, oadd;
107	int error;
108
109	/*
110	 * Convert the flock structure into a start and end.
111	 */
112	switch (fl->l_whence) {
113
114	case SEEK_SET:
115	case SEEK_CUR:
116		/*
117		 * Caller is responsible for adding any necessary offset
118		 * when SEEK_CUR is used.
119		 */
120		start = fl->l_start;
121		break;
122
123	case SEEK_END:
124		if (size > OFF_MAX ||
125		    (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
126			return (EOVERFLOW);
127		start = size + fl->l_start;
128		break;
129
130	default:
131		return (EINVAL);
132	}
133	if (start < 0)
134		return (EINVAL);
135	if (fl->l_len < 0) {
136		if (start == 0)
137			return (EINVAL);
138		end = start - 1;
139		start += fl->l_len;
140		if (start < 0)
141			return (EINVAL);
142	} else if (fl->l_len == 0)
143		end = -1;
144	else {
145		oadd = fl->l_len - 1;
146		if (oadd > OFF_MAX - start)
147			return (EOVERFLOW);
148		end = start + oadd;
149	}
150	/*
151	 * Avoid the common case of unlocking when inode has no locks.
152	 */
153	if (*head == (struct lockf *)0) {
154		if (ap->a_op != F_SETLK) {
155			fl->l_type = F_UNLCK;
156			return (0);
157		}
158	}
159	/*
160	 * Create the lockf structure
161	 */
162	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
163	lock->lf_start = start;
164	lock->lf_end = end;
165	lock->lf_id = ap->a_id;
166	/*
167	 * XXX The problem is that VTOI is ufs specific, so it will
168	 * break LOCKF_DEBUG for all other FS's other than UFS because
169	 * it casts the vnode->data ptr to struct inode *.
170	 */
171/*	lock->lf_inode = VTOI(ap->a_vp); */
172	lock->lf_inode = (struct inode *)0;
173	lock->lf_type = fl->l_type;
174	lock->lf_head = head;
175	lock->lf_next = (struct lockf *)0;
176	TAILQ_INIT(&lock->lf_blkhd);
177	lock->lf_flags = ap->a_flags;
178	/*
179	 * Do the requested operation.
180	 */
181	switch(ap->a_op) {
182	case F_SETLK:
183		return (lf_setlock(lock));
184
185	case F_UNLCK:
186		error = lf_clearlock(lock);
187		FREE(lock, M_LOCKF);
188		return (error);
189
190	case F_GETLK:
191		error = lf_getlock(lock, fl);
192		FREE(lock, M_LOCKF);
193		return (error);
194
195	default:
196		free(lock, M_LOCKF);
197		return (EINVAL);
198	}
199	/* NOTREACHED */
200}
201
202/*
203 * Set a byte-range lock.
204 */
205static int
206lf_setlock(lock)
207	register struct lockf *lock;
208{
209	register struct lockf *block;
210	struct lockf **head = lock->lf_head;
211	struct lockf **prev, *overlap, *ltmp;
212	static char lockstr[] = "lockf";
213	int ovcase, priority, needtolink, error;
214
215#ifdef LOCKF_DEBUG
216	if (lockf_debug & 1)
217		lf_print("lf_setlock", lock);
218#endif /* LOCKF_DEBUG */
219
220	/*
221	 * Set the priority
222	 */
223	priority = PLOCK;
224	if (lock->lf_type == F_WRLCK)
225		priority += 4;
226	priority |= PCATCH;
227	/*
228	 * Scan lock list for this file looking for locks that would block us.
229	 */
230	while ((block = lf_getblock(lock))) {
231		/*
232		 * Free the structure and return if nonblocking.
233		 */
234		if ((lock->lf_flags & F_WAIT) == 0) {
235			FREE(lock, M_LOCKF);
236			return (EAGAIN);
237		}
238		/*
239		 * We are blocked. Since flock style locks cover
240		 * the whole file, there is no chance for deadlock.
241		 * For byte-range locks we must check for deadlock.
242		 *
243		 * Deadlock detection is done by looking through the
244		 * wait channels to see if there are any cycles that
245		 * involve us. MAXDEPTH is set just to make sure we
246		 * do not go off into neverland.
247		 */
248		if ((lock->lf_flags & F_POSIX) &&
249		    (block->lf_flags & F_POSIX)) {
250			register struct proc *wproc;
251			struct thread *td;
252			register struct lockf *waitblock;
253			int i = 0;
254
255			/* The block is waiting on something */
256			/* XXXKSE this is not complete under threads */
257			wproc = (struct proc *)block->lf_id;
258			mtx_lock_spin(&sched_lock);
259			FOREACH_THREAD_IN_PROC(wproc, td) {
260				while (td->td_wchan &&
261				    (td->td_wmesg == lockstr) &&
262				    (i++ < maxlockdepth)) {
263					waitblock = (struct lockf *)td->td_wchan;
264					/* Get the owner of the blocking lock */
265					waitblock = waitblock->lf_next;
266					if ((waitblock->lf_flags & F_POSIX) == 0)
267						break;
268					wproc = (struct proc *)waitblock->lf_id;
269					if (wproc == (struct proc *)lock->lf_id) {
270						mtx_unlock_spin(&sched_lock);
271						free(lock, M_LOCKF);
272						return (EDEADLK);
273					}
274				}
275			}
276			mtx_unlock_spin(&sched_lock);
277		}
278		/*
279		 * For flock type locks, we must first remove
280		 * any shared locks that we hold before we sleep
281		 * waiting for an exclusive lock.
282		 */
283		if ((lock->lf_flags & F_FLOCK) &&
284		    lock->lf_type == F_WRLCK) {
285			lock->lf_type = F_UNLCK;
286			(void) lf_clearlock(lock);
287			lock->lf_type = F_WRLCK;
288		}
289		/*
290		 * Add our lock to the blocked list and sleep until we're free.
291		 * Remember who blocked us (for deadlock detection).
292		 */
293		lock->lf_next = block;
294		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
295#ifdef LOCKF_DEBUG
296		if (lockf_debug & 1) {
297			lf_print("lf_setlock: blocking on", block);
298			lf_printlist("lf_setlock", block);
299		}
300#endif /* LOCKF_DEBUG */
301		error = tsleep(lock, priority, lockstr, 0);
302		/*
303		 * We may have been awakened by a signal and/or by a
304		 * debugger continuing us (in which cases we must remove
305		 * ourselves from the blocked list) and/or by another
306		 * process releasing a lock (in which case we have
307		 * already been removed from the blocked list and our
308		 * lf_next field set to NOLOCKF).
309		 */
310		if (lock->lf_next) {
311			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
312			lock->lf_next = NOLOCKF;
313		}
314		if (error) {
315			free(lock, M_LOCKF);
316			return (error);
317		}
318	}
319	/*
320	 * No blocks!!  Add the lock.  Note that we will
321	 * downgrade or upgrade any overlapping locks this
322	 * process already owns.
323	 *
324	 * Skip over locks owned by other processes.
325	 * Handle any locks that overlap and are owned by ourselves.
326	 */
327	prev = head;
328	block = *head;
329	needtolink = 1;
330	for (;;) {
331		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
332		if (ovcase)
333			block = overlap->lf_next;
334		/*
335		 * Six cases:
336		 *	0) no overlap
337		 *	1) overlap == lock
338		 *	2) overlap contains lock
339		 *	3) lock contains overlap
340		 *	4) overlap starts before lock
341		 *	5) overlap ends after lock
342		 */
343		switch (ovcase) {
344		case 0: /* no overlap */
345			if (needtolink) {
346				*prev = lock;
347				lock->lf_next = overlap;
348			}
349			break;
350
351		case 1: /* overlap == lock */
352			/*
353			 * If downgrading lock, others may be
354			 * able to acquire it.
355			 */
356			if (lock->lf_type == F_RDLCK &&
357			    overlap->lf_type == F_WRLCK)
358				lf_wakelock(overlap);
359			overlap->lf_type = lock->lf_type;
360			FREE(lock, M_LOCKF);
361			lock = overlap; /* for debug output below */
362			break;
363
364		case 2: /* overlap contains lock */
365			/*
366			 * Check for common starting point and different types.
367			 */
368			if (overlap->lf_type == lock->lf_type) {
369				free(lock, M_LOCKF);
370				lock = overlap; /* for debug output below */
371				break;
372			}
373			if (overlap->lf_start == lock->lf_start) {
374				*prev = lock;
375				lock->lf_next = overlap;
376				overlap->lf_start = lock->lf_end + 1;
377			} else
378				lf_split(overlap, lock);
379			lf_wakelock(overlap);
380			break;
381
382		case 3: /* lock contains overlap */
383			/*
384			 * If downgrading lock, others may be able to
385			 * acquire it, otherwise take the list.
386			 */
387			if (lock->lf_type == F_RDLCK &&
388			    overlap->lf_type == F_WRLCK) {
389				lf_wakelock(overlap);
390			} else {
391				while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
392					ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
393					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
394					    lf_block);
395					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
396					    ltmp, lf_block);
397					ltmp->lf_next = lock;
398				}
399			}
400			/*
401			 * Add the new lock if necessary and delete the overlap.
402			 */
403			if (needtolink) {
404				*prev = lock;
405				lock->lf_next = overlap->lf_next;
406				prev = &lock->lf_next;
407				needtolink = 0;
408			} else
409				*prev = overlap->lf_next;
410			free(overlap, M_LOCKF);
411			continue;
412
413		case 4: /* overlap starts before lock */
414			/*
415			 * Add lock after overlap on the list.
416			 */
417			lock->lf_next = overlap->lf_next;
418			overlap->lf_next = lock;
419			overlap->lf_end = lock->lf_start - 1;
420			prev = &lock->lf_next;
421			lf_wakelock(overlap);
422			needtolink = 0;
423			continue;
424
425		case 5: /* overlap ends after lock */
426			/*
427			 * Add the new lock before overlap.
428			 */
429			if (needtolink) {
430				*prev = lock;
431				lock->lf_next = overlap;
432			}
433			overlap->lf_start = lock->lf_end + 1;
434			lf_wakelock(overlap);
435			break;
436		}
437		break;
438	}
439#ifdef LOCKF_DEBUG
440	if (lockf_debug & 1) {
441		lf_print("lf_setlock: got the lock", lock);
442		lf_printlist("lf_setlock", lock);
443	}
444#endif /* LOCKF_DEBUG */
445	return (0);
446}
447
448/*
449 * Remove a byte-range lock on an inode.
450 *
451 * Generally, find the lock (or an overlap to that lock)
452 * and remove it (or shrink it), then wakeup anyone we can.
453 */
454static int
455lf_clearlock(unlock)
456	register struct lockf *unlock;
457{
458	struct lockf **head = unlock->lf_head;
459	register struct lockf *lf = *head;
460	struct lockf *overlap, **prev;
461	int ovcase;
462
463	if (lf == NOLOCKF)
464		return (0);
465#ifdef LOCKF_DEBUG
466	if (unlock->lf_type != F_UNLCK)
467		panic("lf_clearlock: bad type");
468	if (lockf_debug & 1)
469		lf_print("lf_clearlock", unlock);
470#endif /* LOCKF_DEBUG */
471	prev = head;
472	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
473		/*
474		 * Wakeup the list of locks to be retried.
475		 */
476		lf_wakelock(overlap);
477
478		switch (ovcase) {
479
480		case 1: /* overlap == lock */
481			*prev = overlap->lf_next;
482			FREE(overlap, M_LOCKF);
483			break;
484
485		case 2: /* overlap contains lock: split it */
486			if (overlap->lf_start == unlock->lf_start) {
487				overlap->lf_start = unlock->lf_end + 1;
488				break;
489			}
490			lf_split(overlap, unlock);
491			overlap->lf_next = unlock->lf_next;
492			break;
493
494		case 3: /* lock contains overlap */
495			*prev = overlap->lf_next;
496			lf = overlap->lf_next;
497			free(overlap, M_LOCKF);
498			continue;
499
500		case 4: /* overlap starts before lock */
501			overlap->lf_end = unlock->lf_start - 1;
502			prev = &overlap->lf_next;
503			lf = overlap->lf_next;
504			continue;
505
506		case 5: /* overlap ends after lock */
507			overlap->lf_start = unlock->lf_end + 1;
508			break;
509		}
510		break;
511	}
512#ifdef LOCKF_DEBUG
513	if (lockf_debug & 1)
514		lf_printlist("lf_clearlock", unlock);
515#endif /* LOCKF_DEBUG */
516	return (0);
517}
518
519/*
520 * Check whether there is a blocking lock,
521 * and if so return its process identifier.
522 */
523static int
524lf_getlock(lock, fl)
525	register struct lockf *lock;
526	register struct flock *fl;
527{
528	register struct lockf *block;
529
530#ifdef LOCKF_DEBUG
531	if (lockf_debug & 1)
532		lf_print("lf_getlock", lock);
533#endif /* LOCKF_DEBUG */
534
535	if ((block = lf_getblock(lock))) {
536		fl->l_type = block->lf_type;
537		fl->l_whence = SEEK_SET;
538		fl->l_start = block->lf_start;
539		if (block->lf_end == -1)
540			fl->l_len = 0;
541		else
542			fl->l_len = block->lf_end - block->lf_start + 1;
543		if (block->lf_flags & F_POSIX)
544			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
545		else
546			fl->l_pid = -1;
547	} else {
548		fl->l_type = F_UNLCK;
549	}
550	return (0);
551}
552
553/*
554 * Walk the list of locks for an inode and
555 * return the first blocking lock.
556 */
557static struct lockf *
558lf_getblock(lock)
559	register struct lockf *lock;
560{
561	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
562	int ovcase;
563
564	prev = lock->lf_head;
565	while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
566		/*
567		 * We've found an overlap, see if it blocks us
568		 */
569		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
570			return (overlap);
571		/*
572		 * Nope, point to the next one on the list and
573		 * see if it blocks us
574		 */
575		lf = overlap->lf_next;
576	}
577	return (NOLOCKF);
578}
579
580/*
581 * Walk the list of locks for an inode to
582 * find an overlapping lock (if any).
583 *
584 * NOTE: this returns only the FIRST overlapping lock.  There
585 *	 may be more than one.
586 */
587static int
588lf_findoverlap(lf, lock, type, prev, overlap)
589	register struct lockf *lf;
590	struct lockf *lock;
591	int type;
592	struct lockf ***prev;
593	struct lockf **overlap;
594{
595	off_t start, end;
596
597	*overlap = lf;
598	if (lf == NOLOCKF)
599		return (0);
600#ifdef LOCKF_DEBUG
601	if (lockf_debug & 2)
602		lf_print("lf_findoverlap: looking for overlap in", lock);
603#endif /* LOCKF_DEBUG */
604	start = lock->lf_start;
605	end = lock->lf_end;
606	while (lf != NOLOCKF) {
607		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
608		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
609			*prev = &lf->lf_next;
610			*overlap = lf = lf->lf_next;
611			continue;
612		}
613#ifdef LOCKF_DEBUG
614		if (lockf_debug & 2)
615			lf_print("\tchecking", lf);
616#endif /* LOCKF_DEBUG */
617		/*
618		 * OK, check for overlap
619		 *
620		 * Six cases:
621		 *	0) no overlap
622		 *	1) overlap == lock
623		 *	2) overlap contains lock
624		 *	3) lock contains overlap
625		 *	4) overlap starts before lock
626		 *	5) overlap ends after lock
627		 */
628		if ((lf->lf_end != -1 && start > lf->lf_end) ||
629		    (end != -1 && lf->lf_start > end)) {
630			/* Case 0 */
631#ifdef LOCKF_DEBUG
632			if (lockf_debug & 2)
633				printf("no overlap\n");
634#endif /* LOCKF_DEBUG */
635			if ((type & SELF) && end != -1 && lf->lf_start > end)
636				return (0);
637			*prev = &lf->lf_next;
638			*overlap = lf = lf->lf_next;
639			continue;
640		}
641		if ((lf->lf_start == start) && (lf->lf_end == end)) {
642			/* Case 1 */
643#ifdef LOCKF_DEBUG
644			if (lockf_debug & 2)
645				printf("overlap == lock\n");
646#endif /* LOCKF_DEBUG */
647			return (1);
648		}
649		if ((lf->lf_start <= start) &&
650		    (end != -1) &&
651		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
652			/* Case 2 */
653#ifdef LOCKF_DEBUG
654			if (lockf_debug & 2)
655				printf("overlap contains lock\n");
656#endif /* LOCKF_DEBUG */
657			return (2);
658		}
659		if (start <= lf->lf_start &&
660		           (end == -1 ||
661			   (lf->lf_end != -1 && end >= lf->lf_end))) {
662			/* Case 3 */
663#ifdef LOCKF_DEBUG
664			if (lockf_debug & 2)
665				printf("lock contains overlap\n");
666#endif /* LOCKF_DEBUG */
667			return (3);
668		}
669		if ((lf->lf_start < start) &&
670			((lf->lf_end >= start) || (lf->lf_end == -1))) {
671			/* Case 4 */
672#ifdef LOCKF_DEBUG
673			if (lockf_debug & 2)
674				printf("overlap starts before lock\n");
675#endif /* LOCKF_DEBUG */
676			return (4);
677		}
678		if ((lf->lf_start > start) &&
679			(end != -1) &&
680			((lf->lf_end > end) || (lf->lf_end == -1))) {
681			/* Case 5 */
682#ifdef LOCKF_DEBUG
683			if (lockf_debug & 2)
684				printf("overlap ends after lock\n");
685#endif /* LOCKF_DEBUG */
686			return (5);
687		}
688		panic("lf_findoverlap: default");
689	}
690	return (0);
691}
692
693/*
694 * Split a lock and a contained region into
695 * two or three locks as necessary.
696 */
697static void
698lf_split(lock1, lock2)
699	register struct lockf *lock1;
700	register struct lockf *lock2;
701{
702	register struct lockf *splitlock;
703
704#ifdef LOCKF_DEBUG
705	if (lockf_debug & 2) {
706		lf_print("lf_split", lock1);
707		lf_print("splitting from", lock2);
708	}
709#endif /* LOCKF_DEBUG */
710	/*
711	 * Check to see if spliting into only two pieces.
712	 */
713	if (lock1->lf_start == lock2->lf_start) {
714		lock1->lf_start = lock2->lf_end + 1;
715		lock2->lf_next = lock1;
716		return;
717	}
718	if (lock1->lf_end == lock2->lf_end) {
719		lock1->lf_end = lock2->lf_start - 1;
720		lock2->lf_next = lock1->lf_next;
721		lock1->lf_next = lock2;
722		return;
723	}
724	/*
725	 * Make a new lock consisting of the last part of
726	 * the encompassing lock
727	 */
728	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
729	bcopy(lock1, splitlock, sizeof *splitlock);
730	splitlock->lf_start = lock2->lf_end + 1;
731	TAILQ_INIT(&splitlock->lf_blkhd);
732	lock1->lf_end = lock2->lf_start - 1;
733	/*
734	 * OK, now link it in
735	 */
736	splitlock->lf_next = lock1->lf_next;
737	lock2->lf_next = splitlock;
738	lock1->lf_next = lock2;
739}
740
741/*
742 * Wakeup a blocklist
743 */
744static void
745lf_wakelock(listhead)
746	struct lockf *listhead;
747{
748	register struct lockf *wakelock;
749
750	while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
751		wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
752		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
753		wakelock->lf_next = NOLOCKF;
754#ifdef LOCKF_DEBUG
755		if (lockf_debug & 2)
756			lf_print("lf_wakelock: awakening", wakelock);
757#endif /* LOCKF_DEBUG */
758		wakeup(wakelock);
759	}
760}
761
762#ifdef LOCKF_DEBUG
763/*
764 * Print out a lock.
765 */
766void
767lf_print(tag, lock)
768	char *tag;
769	register struct lockf *lock;
770{
771
772	printf("%s: lock %p for ", tag, (void *)lock);
773	if (lock->lf_flags & F_POSIX)
774		printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
775	else
776		printf("id %p", (void *)lock->lf_id);
777	if (lock->lf_inode != (struct inode *)0)
778		/* XXX no %qd in kernel.  Truncate. */
779		printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld",
780		    (u_long)lock->lf_inode->i_number,
781		    major(lock->lf_inode->i_dev),
782		    minor(lock->lf_inode->i_dev),
783		    lock->lf_type == F_RDLCK ? "shared" :
784		    lock->lf_type == F_WRLCK ? "exclusive" :
785		    lock->lf_type == F_UNLCK ? "unlock" :
786		    "unknown", (long)lock->lf_start, (long)lock->lf_end);
787	else
788		printf(" %s, start %ld, end %ld",
789		    lock->lf_type == F_RDLCK ? "shared" :
790		    lock->lf_type == F_WRLCK ? "exclusive" :
791		    lock->lf_type == F_UNLCK ? "unlock" :
792		    "unknown", (long)lock->lf_start, (long)lock->lf_end);
793	if (!TAILQ_EMPTY(&lock->lf_blkhd))
794		printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
795	else
796		printf("\n");
797}
798
799void
800lf_printlist(tag, lock)
801	char *tag;
802	struct lockf *lock;
803{
804	register struct lockf *lf, *blk;
805
806	if (lock->lf_inode == (struct inode *)0)
807		return;
808
809	printf("%s: Lock list for ino %lu on dev <%d, %d>:\n",
810	    tag, (u_long)lock->lf_inode->i_number,
811	    major(lock->lf_inode->i_dev),
812	    minor(lock->lf_inode->i_dev));
813	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
814		printf("\tlock %p for ",(void *)lf);
815		if (lf->lf_flags & F_POSIX)
816			printf("proc %ld",
817			    (long)((struct proc *)lf->lf_id)->p_pid);
818		else
819			printf("id %p", (void *)lf->lf_id);
820		/* XXX no %qd in kernel.  Truncate. */
821		printf(", %s, start %ld, end %ld",
822		    lf->lf_type == F_RDLCK ? "shared" :
823		    lf->lf_type == F_WRLCK ? "exclusive" :
824		    lf->lf_type == F_UNLCK ? "unlock" :
825		    "unknown", (long)lf->lf_start, (long)lf->lf_end);
826		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
827			printf("\n\t\tlock request %p for ", (void *)blk);
828			if (blk->lf_flags & F_POSIX)
829				printf("proc %ld",
830				    (long)((struct proc *)blk->lf_id)->p_pid);
831			else
832				printf("id %p", (void *)blk->lf_id);
833			/* XXX no %qd in kernel.  Truncate. */
834			printf(", %s, start %ld, end %ld",
835			    blk->lf_type == F_RDLCK ? "shared" :
836			    blk->lf_type == F_WRLCK ? "exclusive" :
837			    blk->lf_type == F_UNLCK ? "unlock" :
838			    "unknown", (long)blk->lf_start,
839			    (long)blk->lf_end);
840			if (!TAILQ_EMPTY(&blk->lf_blkhd))
841				panic("lf_printlist: bad list");
842		}
843		printf("\n");
844	}
845}
846#endif /* LOCKF_DEBUG */
847