kern_lockf.c revision 171193
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/kern_lockf.c 171193 2007-07-03 21:22:58Z jeff $");
37
38#include "opt_debug_lockf.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/limits.h>
44#include <sys/lock.h>
45#include <sys/mount.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/unistd.h>
49#include <sys/vnode.h>
50#include <sys/malloc.h>
51#include <sys/fcntl.h>
52#include <sys/lockf.h>
53
54/*
55 * This variable controls the maximum number of processes that will
56 * be checked in doing deadlock detection.
57 */
58static int maxlockdepth = MAXDEPTH;
59
60#ifdef LOCKF_DEBUG
61#include <sys/sysctl.h>
62
63#include <ufs/ufs/quota.h>
64#include <ufs/ufs/inode.h>
65
66
67static int	lockf_debug = 0;
68SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
69#endif
70
71MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
72
73#define NOLOCKF (struct lockf *)0
74#define SELF	0x1
75#define OTHERS	0x2
76static int	 lf_clearlock(struct lockf *, struct lockf **);
77static int	 lf_findoverlap(struct lockf *,
78	    struct lockf *, int, struct lockf ***, struct lockf **);
79static struct lockf *
80	 lf_getblock(struct lockf *);
81static int	 lf_getlock(struct lockf *, struct flock *);
82static int	 lf_setlock(struct lockf *, struct vnode *, struct lockf **);
83static void	 lf_split(struct lockf *, struct lockf *, struct lockf **);
84static void	 lf_wakelock(struct lockf *);
85#ifdef LOCKF_DEBUG
86static void	 lf_print(char *, struct lockf *);
87static void	 lf_printlist(char *, struct lockf *);
88#endif
89
90/*
91 * Advisory record locking support
92 */
93int
94lf_advlock(ap, head, size)
95	struct vop_advlock_args /* {
96		struct vnode *a_vp;
97		caddr_t  a_id;
98		int  a_op;
99		struct flock *a_fl;
100		int  a_flags;
101	} */ *ap;
102	struct lockf **head;
103	u_quad_t size;
104{
105	struct flock *fl = ap->a_fl;
106	struct lockf *lock;
107	struct vnode *vp = ap->a_vp;
108	off_t start, end, oadd;
109	struct lockf *split;
110	int error;
111
112	/*
113	 * Convert the flock structure into a start and end.
114	 */
115	switch (fl->l_whence) {
116
117	case SEEK_SET:
118	case SEEK_CUR:
119		/*
120		 * Caller is responsible for adding any necessary offset
121		 * when SEEK_CUR is used.
122		 */
123		start = fl->l_start;
124		break;
125
126	case SEEK_END:
127		if (size > OFF_MAX ||
128		    (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
129			return (EOVERFLOW);
130		start = size + fl->l_start;
131		break;
132
133	default:
134		return (EINVAL);
135	}
136	if (start < 0)
137		return (EINVAL);
138	if (fl->l_len < 0) {
139		if (start == 0)
140			return (EINVAL);
141		end = start - 1;
142		start += fl->l_len;
143		if (start < 0)
144			return (EINVAL);
145	} else if (fl->l_len == 0)
146		end = -1;
147	else {
148		oadd = fl->l_len - 1;
149		if (oadd > OFF_MAX - start)
150			return (EOVERFLOW);
151		end = start + oadd;
152	}
153	/*
154	 * Avoid the common case of unlocking when inode has no locks.
155	 */
156	if (*head == (struct lockf *)0) {
157		if (ap->a_op != F_SETLK) {
158			fl->l_type = F_UNLCK;
159			return (0);
160		}
161	}
162	/*
163	 * Allocate a spare structure in case we have to split.
164	 */
165	split = NULL;
166	if (ap->a_op == F_SETLK || ap->a_op == F_UNLCK)
167		MALLOC(split, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
168	/*
169	 * Create the lockf structure
170	 */
171	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
172	lock->lf_start = start;
173	lock->lf_end = end;
174	lock->lf_id = ap->a_id;
175	/*
176	 * XXX The problem is that VTOI is ufs specific, so it will
177	 * break LOCKF_DEBUG for all other FS's other than UFS because
178	 * it casts the vnode->data ptr to struct inode *.
179	 */
180/*	lock->lf_inode = VTOI(ap->a_vp); */
181	lock->lf_inode = (struct inode *)0;
182	lock->lf_type = fl->l_type;
183	lock->lf_head = head;
184	lock->lf_next = (struct lockf *)0;
185	TAILQ_INIT(&lock->lf_blkhd);
186	lock->lf_flags = ap->a_flags;
187	/*
188	 * Do the requested operation.
189	 */
190	VI_LOCK(vp);
191	switch(ap->a_op) {
192	case F_SETLK:
193		error = lf_setlock(lock, vp, &split);
194		break;
195
196	case F_UNLCK:
197		error = lf_clearlock(lock, &split);
198		FREE(lock, M_LOCKF);
199		break;
200
201	case F_GETLK:
202		error = lf_getlock(lock, fl);
203		FREE(lock, M_LOCKF);
204		break;
205
206	default:
207		free(lock, M_LOCKF);
208		error = EINVAL;
209		break;
210	}
211	VI_UNLOCK(vp);
212	if (split)
213		FREE(split, M_LOCKF);
214	return (error);
215}
216
217/*
218 * Set a byte-range lock.
219 */
220static int
221lf_setlock(lock, vp, split)
222	struct lockf *lock;
223	struct vnode *vp;
224	struct lockf **split;
225{
226	struct lockf *block;
227	struct lockf **head = lock->lf_head;
228	struct lockf **prev, *overlap, *ltmp;
229	static char lockstr[] = "lockf";
230	int ovcase, priority, needtolink, error;
231
232#ifdef LOCKF_DEBUG
233	if (lockf_debug & 1)
234		lf_print("lf_setlock", lock);
235#endif /* LOCKF_DEBUG */
236
237	/*
238	 * Set the priority
239	 */
240	priority = PLOCK;
241	if (lock->lf_type == F_WRLCK)
242		priority += 4;
243	priority |= PCATCH;
244	/*
245	 * Scan lock list for this file looking for locks that would block us.
246	 */
247	while ((block = lf_getblock(lock))) {
248		/*
249		 * Free the structure and return if nonblocking.
250		 */
251		if ((lock->lf_flags & F_WAIT) == 0) {
252			FREE(lock, M_LOCKF);
253			return (EAGAIN);
254		}
255		/*
256		 * We are blocked. Since flock style locks cover
257		 * the whole file, there is no chance for deadlock.
258		 * For byte-range locks we must check for deadlock.
259		 *
260		 * Deadlock detection is done by looking through the
261		 * wait channels to see if there are any cycles that
262		 * involve us. MAXDEPTH is set just to make sure we
263		 * do not go off into neverland.
264		 */
265		if ((lock->lf_flags & F_POSIX) &&
266		    (block->lf_flags & F_POSIX)) {
267			struct proc *wproc;
268			struct proc *nproc;
269			struct thread *td;
270			struct lockf *waitblock;
271			int i = 0;
272
273			/* The block is waiting on something */
274			wproc = (struct proc *)block->lf_id;
275restart:
276			nproc = NULL;
277			PROC_SLOCK(wproc);
278			FOREACH_THREAD_IN_PROC(wproc, td) {
279				thread_lock(td);
280				while (td->td_wchan &&
281				    (td->td_wmesg == lockstr) &&
282				    (i++ < maxlockdepth)) {
283					waitblock = (struct lockf *)td->td_wchan;
284					/* Get the owner of the blocking lock */
285					waitblock = waitblock->lf_next;
286					if ((waitblock->lf_flags & F_POSIX) == 0)
287						break;
288					nproc = (struct proc *)waitblock->lf_id;
289					if (nproc == (struct proc *)lock->lf_id) {
290						PROC_SUNLOCK(wproc);
291						thread_unlock(td);
292						free(lock, M_LOCKF);
293						return (EDEADLK);
294					}
295				}
296				thread_unlock(td);
297			}
298			PROC_SUNLOCK(wproc);
299			wproc = nproc;
300			if (wproc)
301				goto restart;
302		}
303		/*
304		 * For flock type locks, we must first remove
305		 * any shared locks that we hold before we sleep
306		 * waiting for an exclusive lock.
307		 */
308		if ((lock->lf_flags & F_FLOCK) &&
309		    lock->lf_type == F_WRLCK) {
310			lock->lf_type = F_UNLCK;
311			(void) lf_clearlock(lock, split);
312			lock->lf_type = F_WRLCK;
313		}
314		/*
315		 * Add our lock to the blocked list and sleep until we're free.
316		 * Remember who blocked us (for deadlock detection).
317		 */
318		lock->lf_next = block;
319		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
320#ifdef LOCKF_DEBUG
321		if (lockf_debug & 1) {
322			lf_print("lf_setlock: blocking on", block);
323			lf_printlist("lf_setlock", block);
324		}
325#endif /* LOCKF_DEBUG */
326		error = msleep(lock, VI_MTX(vp), priority, lockstr, 0);
327		/*
328		 * We may have been awakened by a signal and/or by a
329		 * debugger continuing us (in which cases we must remove
330		 * ourselves from the blocked list) and/or by another
331		 * process releasing a lock (in which case we have
332		 * already been removed from the blocked list and our
333		 * lf_next field set to NOLOCKF).
334		 */
335		if (lock->lf_next) {
336			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
337			lock->lf_next = NOLOCKF;
338		}
339		if (error) {
340			free(lock, M_LOCKF);
341			return (error);
342		}
343	}
344	/*
345	 * No blocks!!  Add the lock.  Note that we will
346	 * downgrade or upgrade any overlapping locks this
347	 * process already owns.
348	 *
349	 * Skip over locks owned by other processes.
350	 * Handle any locks that overlap and are owned by ourselves.
351	 */
352	prev = head;
353	block = *head;
354	needtolink = 1;
355	for (;;) {
356		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
357		if (ovcase)
358			block = overlap->lf_next;
359		/*
360		 * Six cases:
361		 *	0) no overlap
362		 *	1) overlap == lock
363		 *	2) overlap contains lock
364		 *	3) lock contains overlap
365		 *	4) overlap starts before lock
366		 *	5) overlap ends after lock
367		 */
368		switch (ovcase) {
369		case 0: /* no overlap */
370			if (needtolink) {
371				*prev = lock;
372				lock->lf_next = overlap;
373			}
374			break;
375
376		case 1: /* overlap == lock */
377			/*
378			 * If downgrading lock, others may be
379			 * able to acquire it.
380			 */
381			if (lock->lf_type == F_RDLCK &&
382			    overlap->lf_type == F_WRLCK)
383				lf_wakelock(overlap);
384			overlap->lf_type = lock->lf_type;
385			FREE(lock, M_LOCKF);
386			lock = overlap; /* for debug output below */
387			break;
388
389		case 2: /* overlap contains lock */
390			/*
391			 * Check for common starting point and different types.
392			 */
393			if (overlap->lf_type == lock->lf_type) {
394				free(lock, M_LOCKF);
395				lock = overlap; /* for debug output below */
396				break;
397			}
398			if (overlap->lf_start == lock->lf_start) {
399				*prev = lock;
400				lock->lf_next = overlap;
401				overlap->lf_start = lock->lf_end + 1;
402			} else
403				lf_split(overlap, lock, split);
404			lf_wakelock(overlap);
405			break;
406
407		case 3: /* lock contains overlap */
408			/*
409			 * If downgrading lock, others may be able to
410			 * acquire it, otherwise take the list.
411			 */
412			if (lock->lf_type == F_RDLCK &&
413			    overlap->lf_type == F_WRLCK) {
414				lf_wakelock(overlap);
415			} else {
416				while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
417					ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
418					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
419					    lf_block);
420					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
421					    ltmp, lf_block);
422					ltmp->lf_next = lock;
423				}
424			}
425			/*
426			 * Add the new lock if necessary and delete the overlap.
427			 */
428			if (needtolink) {
429				*prev = lock;
430				lock->lf_next = overlap->lf_next;
431				prev = &lock->lf_next;
432				needtolink = 0;
433			} else
434				*prev = overlap->lf_next;
435			free(overlap, M_LOCKF);
436			continue;
437
438		case 4: /* overlap starts before lock */
439			/*
440			 * Add lock after overlap on the list.
441			 */
442			lock->lf_next = overlap->lf_next;
443			overlap->lf_next = lock;
444			overlap->lf_end = lock->lf_start - 1;
445			prev = &lock->lf_next;
446			lf_wakelock(overlap);
447			needtolink = 0;
448			continue;
449
450		case 5: /* overlap ends after lock */
451			/*
452			 * Add the new lock before overlap.
453			 */
454			if (needtolink) {
455				*prev = lock;
456				lock->lf_next = overlap;
457			}
458			overlap->lf_start = lock->lf_end + 1;
459			lf_wakelock(overlap);
460			break;
461		}
462		break;
463	}
464#ifdef LOCKF_DEBUG
465	if (lockf_debug & 1) {
466		lf_print("lf_setlock: got the lock", lock);
467		lf_printlist("lf_setlock", lock);
468	}
469#endif /* LOCKF_DEBUG */
470	return (0);
471}
472
473/*
474 * Remove a byte-range lock on an inode.
475 *
476 * Generally, find the lock (or an overlap to that lock)
477 * and remove it (or shrink it), then wakeup anyone we can.
478 */
479static int
480lf_clearlock(unlock, split)
481	struct lockf *unlock;
482	struct lockf **split;
483{
484	struct lockf **head = unlock->lf_head;
485	register struct lockf *lf = *head;
486	struct lockf *overlap, **prev;
487	int ovcase;
488
489	if (lf == NOLOCKF)
490		return (0);
491#ifdef LOCKF_DEBUG
492	if (unlock->lf_type != F_UNLCK)
493		panic("lf_clearlock: bad type");
494	if (lockf_debug & 1)
495		lf_print("lf_clearlock", unlock);
496#endif /* LOCKF_DEBUG */
497	prev = head;
498	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
499		/*
500		 * Wakeup the list of locks to be retried.
501		 */
502		lf_wakelock(overlap);
503
504		switch (ovcase) {
505
506		case 1: /* overlap == lock */
507			*prev = overlap->lf_next;
508			FREE(overlap, M_LOCKF);
509			break;
510
511		case 2: /* overlap contains lock: split it */
512			if (overlap->lf_start == unlock->lf_start) {
513				overlap->lf_start = unlock->lf_end + 1;
514				break;
515			}
516			lf_split(overlap, unlock, split);
517			overlap->lf_next = unlock->lf_next;
518			break;
519
520		case 3: /* lock contains overlap */
521			*prev = overlap->lf_next;
522			lf = overlap->lf_next;
523			free(overlap, M_LOCKF);
524			continue;
525
526		case 4: /* overlap starts before lock */
527			overlap->lf_end = unlock->lf_start - 1;
528			prev = &overlap->lf_next;
529			lf = overlap->lf_next;
530			continue;
531
532		case 5: /* overlap ends after lock */
533			overlap->lf_start = unlock->lf_end + 1;
534			break;
535		}
536		break;
537	}
538#ifdef LOCKF_DEBUG
539	if (lockf_debug & 1)
540		lf_printlist("lf_clearlock", unlock);
541#endif /* LOCKF_DEBUG */
542	return (0);
543}
544
545/*
546 * Check whether there is a blocking lock,
547 * and if so return its process identifier.
548 */
549static int
550lf_getlock(lock, fl)
551	register struct lockf *lock;
552	register struct flock *fl;
553{
554	register struct lockf *block;
555
556#ifdef LOCKF_DEBUG
557	if (lockf_debug & 1)
558		lf_print("lf_getlock", lock);
559#endif /* LOCKF_DEBUG */
560
561	if ((block = lf_getblock(lock))) {
562		fl->l_type = block->lf_type;
563		fl->l_whence = SEEK_SET;
564		fl->l_start = block->lf_start;
565		if (block->lf_end == -1)
566			fl->l_len = 0;
567		else
568			fl->l_len = block->lf_end - block->lf_start + 1;
569		if (block->lf_flags & F_POSIX)
570			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
571		else
572			fl->l_pid = -1;
573	} else {
574		fl->l_type = F_UNLCK;
575	}
576	return (0);
577}
578
579/*
580 * Walk the list of locks for an inode and
581 * return the first blocking lock.
582 */
583static struct lockf *
584lf_getblock(lock)
585	register struct lockf *lock;
586{
587	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
588	int ovcase;
589
590	prev = lock->lf_head;
591	while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
592		/*
593		 * We've found an overlap, see if it blocks us
594		 */
595		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
596			return (overlap);
597		/*
598		 * Nope, point to the next one on the list and
599		 * see if it blocks us
600		 */
601		lf = overlap->lf_next;
602	}
603	return (NOLOCKF);
604}
605
606/*
607 * Walk the list of locks for an inode to
608 * find an overlapping lock (if any).
609 *
610 * NOTE: this returns only the FIRST overlapping lock.  There
611 *	 may be more than one.
612 */
613static int
614lf_findoverlap(lf, lock, type, prev, overlap)
615	register struct lockf *lf;
616	struct lockf *lock;
617	int type;
618	struct lockf ***prev;
619	struct lockf **overlap;
620{
621	off_t start, end;
622
623	*overlap = lf;
624	if (lf == NOLOCKF)
625		return (0);
626#ifdef LOCKF_DEBUG
627	if (lockf_debug & 2)
628		lf_print("lf_findoverlap: looking for overlap in", lock);
629#endif /* LOCKF_DEBUG */
630	start = lock->lf_start;
631	end = lock->lf_end;
632	while (lf != NOLOCKF) {
633		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
634		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
635			*prev = &lf->lf_next;
636			*overlap = lf = lf->lf_next;
637			continue;
638		}
639#ifdef LOCKF_DEBUG
640		if (lockf_debug & 2)
641			lf_print("\tchecking", lf);
642#endif /* LOCKF_DEBUG */
643		/*
644		 * OK, check for overlap
645		 *
646		 * Six cases:
647		 *	0) no overlap
648		 *	1) overlap == lock
649		 *	2) overlap contains lock
650		 *	3) lock contains overlap
651		 *	4) overlap starts before lock
652		 *	5) overlap ends after lock
653		 */
654		if ((lf->lf_end != -1 && start > lf->lf_end) ||
655		    (end != -1 && lf->lf_start > end)) {
656			/* Case 0 */
657#ifdef LOCKF_DEBUG
658			if (lockf_debug & 2)
659				printf("no overlap\n");
660#endif /* LOCKF_DEBUG */
661			if ((type & SELF) && end != -1 && lf->lf_start > end)
662				return (0);
663			*prev = &lf->lf_next;
664			*overlap = lf = lf->lf_next;
665			continue;
666		}
667		if ((lf->lf_start == start) && (lf->lf_end == end)) {
668			/* Case 1 */
669#ifdef LOCKF_DEBUG
670			if (lockf_debug & 2)
671				printf("overlap == lock\n");
672#endif /* LOCKF_DEBUG */
673			return (1);
674		}
675		if ((lf->lf_start <= start) &&
676		    (end != -1) &&
677		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
678			/* Case 2 */
679#ifdef LOCKF_DEBUG
680			if (lockf_debug & 2)
681				printf("overlap contains lock\n");
682#endif /* LOCKF_DEBUG */
683			return (2);
684		}
685		if (start <= lf->lf_start &&
686		           (end == -1 ||
687			   (lf->lf_end != -1 && end >= lf->lf_end))) {
688			/* Case 3 */
689#ifdef LOCKF_DEBUG
690			if (lockf_debug & 2)
691				printf("lock contains overlap\n");
692#endif /* LOCKF_DEBUG */
693			return (3);
694		}
695		if ((lf->lf_start < start) &&
696			((lf->lf_end >= start) || (lf->lf_end == -1))) {
697			/* Case 4 */
698#ifdef LOCKF_DEBUG
699			if (lockf_debug & 2)
700				printf("overlap starts before lock\n");
701#endif /* LOCKF_DEBUG */
702			return (4);
703		}
704		if ((lf->lf_start > start) &&
705			(end != -1) &&
706			((lf->lf_end > end) || (lf->lf_end == -1))) {
707			/* Case 5 */
708#ifdef LOCKF_DEBUG
709			if (lockf_debug & 2)
710				printf("overlap ends after lock\n");
711#endif /* LOCKF_DEBUG */
712			return (5);
713		}
714		panic("lf_findoverlap: default");
715	}
716	return (0);
717}
718
719/*
720 * Split a lock and a contained region into
721 * two or three locks as necessary.
722 */
723static void
724lf_split(lock1, lock2, split)
725	struct lockf *lock1;
726	struct lockf *lock2;
727	struct lockf **split;
728{
729	struct lockf *splitlock;
730
731#ifdef LOCKF_DEBUG
732	if (lockf_debug & 2) {
733		lf_print("lf_split", lock1);
734		lf_print("splitting from", lock2);
735	}
736#endif /* LOCKF_DEBUG */
737	/*
738	 * Check to see if spliting into only two pieces.
739	 */
740	if (lock1->lf_start == lock2->lf_start) {
741		lock1->lf_start = lock2->lf_end + 1;
742		lock2->lf_next = lock1;
743		return;
744	}
745	if (lock1->lf_end == lock2->lf_end) {
746		lock1->lf_end = lock2->lf_start - 1;
747		lock2->lf_next = lock1->lf_next;
748		lock1->lf_next = lock2;
749		return;
750	}
751	/*
752	 * Make a new lock consisting of the last part of
753	 * the encompassing lock.  We use the preallocated
754	 * splitlock so we don't have to block.
755	 */
756	splitlock = *split;
757	*split = NULL;
758	bcopy(lock1, splitlock, sizeof *splitlock);
759	splitlock->lf_start = lock2->lf_end + 1;
760	TAILQ_INIT(&splitlock->lf_blkhd);
761	lock1->lf_end = lock2->lf_start - 1;
762	/*
763	 * OK, now link it in
764	 */
765	splitlock->lf_next = lock1->lf_next;
766	lock2->lf_next = splitlock;
767	lock1->lf_next = lock2;
768}
769
770/*
771 * Wakeup a blocklist
772 */
773static void
774lf_wakelock(listhead)
775	struct lockf *listhead;
776{
777	register struct lockf *wakelock;
778
779	while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
780		wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
781		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
782		wakelock->lf_next = NOLOCKF;
783#ifdef LOCKF_DEBUG
784		if (lockf_debug & 2)
785			lf_print("lf_wakelock: awakening", wakelock);
786#endif /* LOCKF_DEBUG */
787		wakeup(wakelock);
788	}
789}
790
791#ifdef LOCKF_DEBUG
792/*
793 * Print out a lock.
794 */
795static void
796lf_print(tag, lock)
797	char *tag;
798	register struct lockf *lock;
799{
800
801	printf("%s: lock %p for ", tag, (void *)lock);
802	if (lock->lf_flags & F_POSIX)
803		printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
804	else
805		printf("id %p", (void *)lock->lf_id);
806	if (lock->lf_inode != (struct inode *)0)
807		printf(" in ino %ju on dev <%s>, %s, start %jd, end %jd",
808		    (uintmax_t)lock->lf_inode->i_number,
809		    devtoname(lock->lf_inode->i_dev),
810		    lock->lf_type == F_RDLCK ? "shared" :
811		    lock->lf_type == F_WRLCK ? "exclusive" :
812		    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
813		    (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
814	else
815		printf(" %s, start %jd, end %jd",
816		    lock->lf_type == F_RDLCK ? "shared" :
817		    lock->lf_type == F_WRLCK ? "exclusive" :
818		    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
819		    (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
820	if (!TAILQ_EMPTY(&lock->lf_blkhd))
821		printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
822	else
823		printf("\n");
824}
825
826static void
827lf_printlist(tag, lock)
828	char *tag;
829	struct lockf *lock;
830{
831	register struct lockf *lf, *blk;
832
833	if (lock->lf_inode == (struct inode *)0)
834		return;
835
836	printf("%s: Lock list for ino %ju on dev <%s>:\n",
837	    tag, (uintmax_t)lock->lf_inode->i_number,
838	    devtoname(lock->lf_inode->i_dev));
839	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
840		printf("\tlock %p for ",(void *)lf);
841		if (lf->lf_flags & F_POSIX)
842			printf("proc %ld",
843			    (long)((struct proc *)lf->lf_id)->p_pid);
844		else
845			printf("id %p", (void *)lf->lf_id);
846		printf(", %s, start %jd, end %jd",
847		    lf->lf_type == F_RDLCK ? "shared" :
848		    lf->lf_type == F_WRLCK ? "exclusive" :
849		    lf->lf_type == F_UNLCK ? "unlock" :
850		    "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
851		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
852			printf("\n\t\tlock request %p for ", (void *)blk);
853			if (blk->lf_flags & F_POSIX)
854				printf("proc %ld",
855				    (long)((struct proc *)blk->lf_id)->p_pid);
856			else
857				printf("id %p", (void *)blk->lf_id);
858			printf(", %s, start %jd, end %jd",
859			    blk->lf_type == F_RDLCK ? "shared" :
860			    blk->lf_type == F_WRLCK ? "exclusive" :
861			    blk->lf_type == F_UNLCK ? "unlock" :
862			    "unknown", (intmax_t)blk->lf_start,
863			    (intmax_t)blk->lf_end);
864			if (!TAILQ_EMPTY(&blk->lf_blkhd))
865				panic("lf_printlist: bad list");
866		}
867		printf("\n");
868	}
869}
870#endif /* LOCKF_DEBUG */
871