1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1986, 1989, 1993
30 *	The Regents of the University of California.  All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Scooter Morris at Genentech Inc.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 *    notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 *    notice, this list of conditions and the following disclaimer in the
42 *    documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 *    may be used to endorse or promote products derived from this software
45 *    without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
60 */
61
62#include <sys/cdefs.h>
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/lock.h>
67#include <sys/mount.h>
68#include <sys/proc.h>
69#include <sys/signalvar.h>
70#include <sys/unistd.h>
71#include <sys/user.h>
72#include <sys/vnode.h>
73#include <sys/vnode_internal.h>
74#include <sys/vnode_if.h>
75#include <sys/malloc.h>
76#include <sys/fcntl.h>
77#include <sys/lockf.h>
78#include <sys/sdt.h>
79#include <kern/task.h>
80
81/*
82 * This variable controls the maximum number of processes that will
83 * be checked in doing deadlock detection.
84 */
85static int maxlockdepth = MAXDEPTH;
86
87#ifdef LOCKF_DEBUGGING
88#include <sys/sysctl.h>
89#include <ufs/ufs/quota.h>
90#include <ufs/ufs/inode.h>
91void lf_print(const char *tag, struct lockf *lock);
92void lf_printlist(const char *tag, struct lockf *lock);
93static int	lockf_debug = 2;
94SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, "");
95
96/*
97 * If there is no mask bit selector, or there is on, and the selector is
98 * set, then output the debugging diagnostic.
99 */
100#define LOCKF_DEBUG(mask, ...)					\
101	do {							\
102		if( !(mask) || ((mask) & lockf_debug)) {	\
103			printf(__VA_ARGS__);			\
104		}						\
105	} while(0)
106#else	/* !LOCKF_DEBUGGING */
107#define LOCKF_DEBUG(mask, ...)		/* mask */
108#endif	/* !LOCKF_DEBUGGING */
109
110MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
111
112#define NOLOCKF (struct lockf *)0
113#define SELF	0x1
114#define OTHERS	0x2
115#define OFF_MAX	0x7fffffffffffffffULL	/* max off_t */
116
117/*
118 * Overlapping lock states
119 */
120typedef enum {
121	OVERLAP_NONE = 0,
122	OVERLAP_EQUALS_LOCK,
123	OVERLAP_CONTAINS_LOCK,
124	OVERLAP_CONTAINED_BY_LOCK,
125	OVERLAP_STARTS_BEFORE_LOCK,
126	OVERLAP_ENDS_AFTER_LOCK
127} overlap_t;
128
129static int	 lf_clearlock(struct lockf *);
130static overlap_t lf_findoverlap(struct lockf *,
131	    struct lockf *, int, struct lockf ***, struct lockf **);
132static struct lockf *lf_getblock(struct lockf *, pid_t);
133static int	 lf_getlock(struct lockf *, struct flock *, pid_t);
134static int	 lf_setlock(struct lockf *, struct timespec *);
135static int	 lf_split(struct lockf *, struct lockf *);
136static void	 lf_wakelock(struct lockf *, boolean_t);
137#if IMPORTANCE_INHERITANCE
138static void	 lf_hold_assertion(task_t, struct lockf *);
139static void	 lf_jump_to_queue_head(struct lockf *, struct lockf *);
140static void	 lf_drop_assertion(struct lockf *);
141#endif /* IMPORTANCE_INHERITANCE */
142
143/*
144 * lf_advlock
145 *
146 * Description:	Advisory record locking support
147 *
148 * Parameters:	ap			Argument pointer to a vnop_advlock_args
149 *					argument descriptor structure for the
150 *					lock operation to be attempted.
151 *
152 * Returns:	0			Success
153 *		EOVERFLOW
154 *		EINVAL
155 *		ENOLCK			Number of locked regions exceeds limit
156 *	lf_setlock:EAGAIN
157 *	lf_setlock:EDEADLK
158 *	lf_setlock:EINTR
159 *	lf_setlock:ENOLCK
160 *	lf_setlock:ETIMEDOUT
161 *	lf_clearlock:ENOLCK
162 *	vnode_size:???
163 *
164 * Notes:	We return ENOLCK when we run out of memory to support locks; as
165 *		such, there is no specific expectation limit other than the
166 *		amount of available resources.
167 */
168int
169lf_advlock(struct vnop_advlock_args *ap)
170{
171	struct vnode *vp = ap->a_vp;
172	struct flock *fl = ap->a_fl;
173	vfs_context_t context = ap->a_context;
174	struct lockf *lock;
175	off_t start, end, oadd;
176	u_quad_t size;
177	int error;
178	struct lockf **head = &vp->v_lockf;
179
180	/* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
181
182	/*
183	 * Avoid the common case of unlocking when inode has no locks.
184	 */
185	if (*head == (struct lockf *)0) {
186		if (ap->a_op != F_SETLK) {
187			fl->l_type = F_UNLCK;
188			LOCKF_DEBUG(0, "lf_advlock: '%s' unlock without lock\n", vfs_context_proc(context)->p_comm);
189			return (0);
190		}
191	}
192
193	/*
194	 * Convert the flock structure into a start and end.
195	 */
196	switch (fl->l_whence) {
197
198	case SEEK_SET:
199	case SEEK_CUR:
200		/*
201		 * Caller is responsible for adding any necessary offset
202		 * when SEEK_CUR is used.
203		 */
204		start = fl->l_start;
205		break;
206
207	case SEEK_END:
208
209		/*
210		 * It's OK to cast the u_quad_t to and off_t here, since they
211		 * are the same storage size, and the value of the returned
212		 * contents will never overflow into the sign bit.  We need to
213		 * do this because we will use size to force range checks.
214		 */
215		if ((error = vnode_size(vp, (off_t *)&size, context))) {
216			LOCKF_DEBUG(0, "lf_advlock: vnode_getattr failed: %d\n", error);
217			return (error);
218		}
219
220		if (size > OFF_MAX ||
221		    (fl->l_start > 0 &&
222		     size > (u_quad_t)(OFF_MAX - fl->l_start)))
223			return (EOVERFLOW);
224		start = size + fl->l_start;
225		break;
226
227	default:
228		LOCKF_DEBUG(0, "lf_advlock: unknown whence %d\n", fl->l_whence);
229		return (EINVAL);
230	}
231	if (start < 0) {
232		LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start);
233		return (EINVAL);
234	}
235	if (fl->l_len < 0) {
236		if (start == 0) {
237			LOCKF_DEBUG(0, "lf_advlock: len < 0 & start == 0\n");
238			return (EINVAL);
239		}
240		end = start - 1;
241		start += fl->l_len;
242		if (start < 0) {
243			LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start);
244			return (EINVAL);
245		}
246	} else if (fl->l_len == 0)
247		end = -1;
248	else {
249		oadd = fl->l_len - 1;
250		if (oadd > (off_t)(OFF_MAX - start)) {
251			LOCKF_DEBUG(0, "lf_advlock: overflow\n");
252			return (EOVERFLOW);
253		}
254		end = start + oadd;
255	}
256	/*
257	 * Create the lockf structure
258	 */
259	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
260	if (lock == NULL)
261		return (ENOLCK);
262	lock->lf_start = start;
263	lock->lf_end = end;
264	lock->lf_id = ap->a_id;
265	lock->lf_vnode = vp;
266	lock->lf_type = fl->l_type;
267	lock->lf_head = head;
268	lock->lf_next = (struct lockf *)0;
269	TAILQ_INIT(&lock->lf_blkhd);
270	lock->lf_flags = ap->a_flags;
271#if IMPORTANCE_INHERITANCE
272	lock->lf_boosted = LF_NOT_BOOSTED;
273#endif /* IMPORTANCE_INHERITANCE */
274
275	if (ap->a_flags & F_FLOCK)
276	        lock->lf_flags |= F_WAKE1_SAFE;
277
278	lck_mtx_lock(&vp->v_lock);	/* protect the lockf list */
279	/*
280	 * Do the requested operation.
281	 */
282	switch(ap->a_op) {
283	case F_SETLK:
284		error = lf_setlock(lock, ap->a_timeout);
285		break;
286
287	case F_UNLCK:
288		error = lf_clearlock(lock);
289		FREE(lock, M_LOCKF);
290		break;
291
292	case F_GETLK:
293		error = lf_getlock(lock, fl, -1);
294		FREE(lock, M_LOCKF);
295		break;
296
297
298	default:
299		FREE(lock, M_LOCKF);
300		error = EINVAL;
301		break;
302	}
303	lck_mtx_unlock(&vp->v_lock);	/* done manipulating the list */
304
305	LOCKF_DEBUG(0, "lf_advlock: normal exit: %d\n\n", error);
306	return (error);
307}
308
309/*
310 * Empty the queue of msleeping requests for a lock on the given vnode.
311 * Called with the vnode already locked.  Used for forced unmount, where
312 * a flock(2) invoker sleeping on a blocked lock holds an iocount reference
313 * that prevents the vnode from ever being drained.  Force unmounting wins.
314 */
315void
316lf_abort_advlocks(vnode_t vp)
317{
318	struct lockf *lock;
319
320	if ((lock = vp->v_lockf) == NULL)
321		return;
322
323	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
324
325	if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
326		struct lockf *tlock;
327
328		TAILQ_FOREACH(tlock, &lock->lf_blkhd, lf_block) {
329			/*
330			 * Setting this flag should cause all
331			 * currently blocked F_SETLK request to
332			 * return to userland with an errno.
333			 */
334			tlock->lf_flags |= F_ABORT;
335		}
336		lf_wakelock(lock, TRUE);
337	}
338}
339
340/*
341 * Take any lock attempts which are currently blocked by a given lock ("from")
342 * and mark them as blocked by a different lock ("to").  Used in the case
343 * where a byte range currently occupied by "from" is to be occupied by "to."
344 */
345static void
346lf_move_blocked(struct lockf *to, struct lockf *from)
347{
348	struct lockf *tlock;
349
350	TAILQ_FOREACH(tlock, &from->lf_blkhd, lf_block) {
351		tlock->lf_next = to;
352	}
353
354	TAILQ_CONCAT(&to->lf_blkhd, &from->lf_blkhd, lf_block);
355}
356
357/*
358 * lf_coalesce_adjacent
359 *
360 * Description:	Helper function: when setting a lock, coalesce adjacent
361 *		locks.  Needed because adjacent locks are not overlapping,
362 *		but POSIX requires that they be coalesced.
363 *
364 * Parameters:	lock			The new lock which may be adjacent
365 *					to already locked regions, and which
366 *					should therefore be coalesced with them
367 *
368 * Returns:	<void>
369 */
370static void
371lf_coalesce_adjacent(struct lockf *lock)
372{
373	struct lockf **lf = lock->lf_head;
374
375	while (*lf != NOLOCKF) {
376		/* reject locks that obviously could not be coalesced */
377		if ((*lf == lock) ||
378		    ((*lf)->lf_id != lock->lf_id) ||
379		    ((*lf)->lf_type != lock->lf_type)) {
380			lf = &(*lf)->lf_next;
381			continue;
382		}
383
384		/*
385		 * NOTE: Assumes that if two locks are adjacent on the number line
386		 * and belong to the same owner, then they are adjacent on the list.
387		 */
388		if ((*lf)->lf_end != -1 &&
389		    ((*lf)->lf_end + 1) == lock->lf_start) {
390			struct lockf *adjacent = *lf;
391
392			LOCKF_DEBUG(0, "lf_coalesce_adjacent: coalesce adjacent previous\n");
393			lock->lf_start = (*lf)->lf_start;
394			*lf = lock;
395			lf = &(*lf)->lf_next;
396
397			lf_move_blocked(lock, adjacent);
398
399			FREE(adjacent, M_LOCKF);
400			continue;
401		}
402		/* If the lock starts adjacent to us, we can coalesce it */
403		if (lock->lf_end != -1 &&
404		    (lock->lf_end + 1) == (*lf)->lf_start) {
405			struct lockf *adjacent = *lf;
406
407			LOCKF_DEBUG(0, "lf_coalesce_adjacent: coalesce adjacent following\n");
408			lock->lf_end = (*lf)->lf_end;
409			lock->lf_next = (*lf)->lf_next;
410			lf = &lock->lf_next;
411
412			lf_move_blocked(lock, adjacent);
413
414			FREE(adjacent, M_LOCKF);
415			continue;
416		}
417
418		/* no matching conditions; go on to next lock */
419		lf = &(*lf)->lf_next;
420	}
421}
422
423
424/*
425 * lf_setlock
426 *
427 * Description:	Set a byte-range lock.
428 *
429 * Parameters:	lock			The lock structure describing the lock
430 *					to be set; allocated by the caller, it
431 *					will be linked into the lock list if
432 *					the set is successful, and freed if the
433 *					set is unsuccessful.
434 *
435 *		timeout			Timeout specified in the case of
436 * 					SETLKWTIMEOUT.
437 *
438 * Returns:	0			Success
439 *		EAGAIN
440 *		EDEADLK
441 *	lf_split:ENOLCK
442 *	lf_clearlock:ENOLCK
443 *	msleep:EINTR
444 *	msleep:ETIMEDOUT
445 *
446 * Notes:	We add the lock to the provisional lock list.  We do not
447 *		coalesce at this time; this has implications for other lock
448 *		requestors in the blocker search mechanism.
449 */
450static int
451lf_setlock(struct lockf *lock, struct timespec *timeout)
452{
453	struct lockf *block;
454	struct lockf **head = lock->lf_head;
455	struct lockf **prev, *overlap, *ltmp;
456	static char lockstr[] = "lockf";
457	int priority, needtolink, error;
458	struct vnode *vp = lock->lf_vnode;
459	overlap_t ovcase;
460#if IMPORTANCE_INHERITANCE
461	task_t boosting_task, block_task;
462#endif /* IMPORTANCE_INHERITANCE */
463
464#ifdef LOCKF_DEBUGGING
465	if (lockf_debug & 1) {
466		lf_print("lf_setlock", lock);
467		lf_printlist("lf_setlock(in)", lock);
468	}
469#endif /* LOCKF_DEBUGGING */
470
471	/*
472	 * Set the priority
473	 */
474	priority = PLOCK;
475	if (lock->lf_type == F_WRLCK)
476		priority += 4;
477	priority |= PCATCH;
478	/*
479	 * Scan lock list for this file looking for locks that would block us.
480	 */
481	while ((block = lf_getblock(lock, -1))) {
482		/*
483		 * Free the structure and return if nonblocking.
484		 */
485		if ((lock->lf_flags & F_WAIT) == 0) {
486			DTRACE_FSINFO(advlock__nowait, vnode_t, vp);
487			FREE(lock, M_LOCKF);
488			return (EAGAIN);
489		}
490
491		/*
492		 * We are blocked. Since flock style locks cover
493		 * the whole file, there is no chance for deadlock.
494		 * For byte-range locks we must check for deadlock.
495		 *
496		 * Deadlock detection is done by looking through the
497		 * wait channels to see if there are any cycles that
498		 * involve us. MAXDEPTH is set just to make sure we
499		 * do not go off into neverland.
500		 */
501		if ((lock->lf_flags & F_POSIX) &&
502		    (block->lf_flags & F_POSIX)) {
503			struct proc *wproc, *bproc;
504			struct uthread *ut;
505			struct lockf *waitblock;
506			int i = 0;
507
508			/* The block is waiting on something */
509			wproc = (struct proc *)block->lf_id;
510			proc_lock(wproc);
511			TAILQ_FOREACH(ut, &wproc->p_uthlist, uu_list) {
512				/*
513				 * While the thread is asleep (uu_wchan != 0)
514				 * in this code (uu_wmesg == lockstr)
515				 * and we have not exceeded the maximum cycle
516				 * depth (i < maxlockdepth), then check for a
517				 * cycle to see if the lock is blocked behind
518				 * someone blocked behind us.
519				 */
520				while (((waitblock = (struct lockf *)ut->uu_wchan) != NULL) &&
521				    ut->uu_wmesg == lockstr &&
522				    (i++ < maxlockdepth)) {
523					waitblock = (struct lockf *)ut->uu_wchan;
524					/*
525					 * Get the lock blocking the lock
526					 * which would block us, and make
527					 * certain it hasn't come unblocked
528					 * (been granted, e.g. between the time
529					 * we called lf_getblock, and the time
530					 * we successfully acquired the
531					 * proc_lock).
532					 */
533					waitblock = waitblock->lf_next;
534					if (waitblock == NULL)
535						break;
536
537					/*
538					 * Make sure it's an advisory range
539					 * lock and not an overall file lock;
540					 * if we mix lock types, it's our own
541					 * fault.
542					 */
543					if ((waitblock->lf_flags & F_POSIX) == 0)
544						break;
545
546					/*
547					 * If the owner of the lock that's
548					 * blocking a lock that's blocking us
549					 * getting the requested lock, then we
550					 * would deadlock, so error out.
551					 */
552					bproc = (struct proc *)waitblock->lf_id;
553					if (bproc == (struct proc *)lock->lf_id) {
554						proc_unlock(wproc);
555						FREE(lock, M_LOCKF);
556						return (EDEADLK);
557					}
558				}
559			}
560			proc_unlock(wproc);
561		}
562
563		/*
564		 * For flock type locks, we must first remove
565		 * any shared locks that we hold before we sleep
566		 * waiting for an exclusive lock.
567		 */
568		if ((lock->lf_flags & F_FLOCK) &&
569		    lock->lf_type == F_WRLCK) {
570			lock->lf_type = F_UNLCK;
571			if ((error = lf_clearlock(lock)) != 0) {
572				FREE(lock, M_LOCKF);
573				return (error);
574			}
575			lock->lf_type = F_WRLCK;
576		}
577		/*
578		 * Add our lock to the blocked list and sleep until we're free.
579		 * Remember who blocked us (for deadlock detection).
580		 */
581		lock->lf_next = block;
582		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
583
584		if ( !(lock->lf_flags & F_FLOCK))
585		        block->lf_flags &= ~F_WAKE1_SAFE;
586
587#ifdef LOCKF_DEBUGGING
588		if (lockf_debug & 1) {
589			lf_print("lf_setlock: blocking on", block);
590			lf_printlist("lf_setlock(block)", block);
591		}
592#endif /* LOCKF_DEBUGGING */
593		DTRACE_FSINFO(advlock__wait, vnode_t, vp);
594#if IMPORTANCE_INHERITANCE
595		/*
596		 * Posix type of locks are not inherited by child processes and
597		 * it maintains one to one mapping between lock and its owner, while
598		 * Flock type of locks are inherited across forks and it does not
599		 * maintian any one to one mapping between the lock and the lock
600		 * owner. Thus importance donation is done only for Posix type of
601		 * locks.
602		 */
603		if ((lock->lf_flags & F_POSIX) && (block->lf_flags & F_POSIX)) {
604			block_task = proc_task((proc_t) block->lf_id);
605			boosting_task = proc_task((proc_t) lock->lf_id);
606
607			/* Check if current task can donate importance. The
608			 * check of imp_donor bit is done without holding
609			 * any lock. The value may change after you read it,
610			 * but it is ok to boost a task while someone else is
611			 * unboosting you.
612			 *
613			 * TODO: Support live inheritance on file locks.
614			 */
615			if (task_is_importance_donor(boosting_task)) {
616				if (block->lf_boosted != LF_BOOSTED &&
617				    task_is_importance_receiver_type(block_task)) {
618					lf_hold_assertion(block_task, block);
619				}
620				lf_jump_to_queue_head(block, lock);
621			}
622		}
623#endif /* IMPORTANCE_INHERITANCE */
624		error = msleep(lock, &vp->v_lock, priority, lockstr, timeout);
625
626		if (error == 0 && (lock->lf_flags & F_ABORT) != 0)
627			error = EBADF;
628
629		if (lock->lf_next) {
630			/*
631			 * lf_wakelock() always sets wakelock->lf_next to
632			 * NULL before a wakeup; so we've been woken early
633			 * - perhaps by a debugger, signal or other event.
634			 *
635			 * Remove 'lock' from the block list (avoids double-add
636			 * in the spurious case, which would create a cycle)
637			 */
638			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
639			lock->lf_next = NULL;
640
641			if (error == 0) {
642				/*
643				 * If this was a spurious wakeup, retry
644				 */
645				printf("%s: spurious wakeup, retrying lock\n",
646				    __func__);
647				continue;
648			}
649		}
650
651		if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
652		        if ((block = lf_getblock(lock, -1)) != NULL)
653				lf_move_blocked(block, lock);
654		}
655
656		if (error) {
657			if (!TAILQ_EMPTY(&lock->lf_blkhd))
658			        lf_wakelock(lock, TRUE);
659			FREE(lock, M_LOCKF);
660			/* Return ETIMEDOUT if timeout occoured. */
661			if (error == EWOULDBLOCK) {
662				error = ETIMEDOUT;
663			}
664			return (error);
665		}
666	}
667
668	/*
669	 * No blocks!!  Add the lock.  Note that we will
670	 * downgrade or upgrade any overlapping locks this
671	 * process already owns.
672	 *
673	 * Skip over locks owned by other processes.
674	 * Handle any locks that overlap and are owned by ourselves.
675	 */
676	prev = head;
677	block = *head;
678	needtolink = 1;
679	for (;;) {
680		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
681		if (ovcase)
682			block = overlap->lf_next;
683		/*
684		 * Six cases:
685		 *	0) no overlap
686		 *	1) overlap == lock
687		 *	2) overlap contains lock
688		 *	3) lock contains overlap
689		 *	4) overlap starts before lock
690		 *	5) overlap ends after lock
691		 */
692		switch (ovcase) {
693		case OVERLAP_NONE:
694			if (needtolink) {
695				*prev = lock;
696				lock->lf_next = overlap;
697			}
698			break;
699
700		case OVERLAP_EQUALS_LOCK:
701			/*
702			 * If downgrading lock, others may be
703			 * able to acquire it.
704			 */
705			if (lock->lf_type == F_RDLCK &&
706			    overlap->lf_type == F_WRLCK)
707			        lf_wakelock(overlap, TRUE);
708			overlap->lf_type = lock->lf_type;
709			FREE(lock, M_LOCKF);
710			lock = overlap; /* for lf_coalesce_adjacent() */
711			break;
712
713		case OVERLAP_CONTAINS_LOCK:
714			/*
715			 * Check for common starting point and different types.
716			 */
717			if (overlap->lf_type == lock->lf_type) {
718				FREE(lock, M_LOCKF);
719				lock = overlap; /* for lf_coalesce_adjacent() */
720				break;
721			}
722			if (overlap->lf_start == lock->lf_start) {
723				*prev = lock;
724				lock->lf_next = overlap;
725				overlap->lf_start = lock->lf_end + 1;
726			} else {
727				/*
728				 * If we can't split the lock, we can't
729				 * grant it.  Claim a system limit for the
730				 * resource shortage.
731				 */
732				if (lf_split(overlap, lock)) {
733					FREE(lock, M_LOCKF);
734					return (ENOLCK);
735				}
736			}
737			lf_wakelock(overlap, TRUE);
738			break;
739
740		case OVERLAP_CONTAINED_BY_LOCK:
741			/*
742			 * If downgrading lock, others may be able to
743			 * acquire it, otherwise take the list.
744			 */
745			if (lock->lf_type == F_RDLCK &&
746			    overlap->lf_type == F_WRLCK) {
747			        lf_wakelock(overlap, TRUE);
748			} else {
749				while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
750					ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
751					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
752					    lf_block);
753					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
754					    ltmp, lf_block);
755					ltmp->lf_next = lock;
756				}
757			}
758			/*
759			 * Add the new lock if necessary and delete the overlap.
760			 */
761			if (needtolink) {
762				*prev = lock;
763				lock->lf_next = overlap->lf_next;
764				prev = &lock->lf_next;
765				needtolink = 0;
766			} else
767				*prev = overlap->lf_next;
768			FREE(overlap, M_LOCKF);
769			continue;
770
771		case OVERLAP_STARTS_BEFORE_LOCK:
772			/*
773			 * Add lock after overlap on the list.
774			 */
775			lock->lf_next = overlap->lf_next;
776			overlap->lf_next = lock;
777			overlap->lf_end = lock->lf_start - 1;
778			prev = &lock->lf_next;
779			lf_wakelock(overlap, TRUE);
780			needtolink = 0;
781			continue;
782
783		case OVERLAP_ENDS_AFTER_LOCK:
784			/*
785			 * Add the new lock before overlap.
786			 */
787			if (needtolink) {
788				*prev = lock;
789				lock->lf_next = overlap;
790			}
791			overlap->lf_start = lock->lf_end + 1;
792			lf_wakelock(overlap, TRUE);
793			break;
794		}
795		break;
796	}
797	/* Coalesce adjacent locks with identical attributes */
798	lf_coalesce_adjacent(lock);
799#ifdef LOCKF_DEBUGGING
800	if (lockf_debug & 1) {
801		lf_print("lf_setlock: got the lock", lock);
802		lf_printlist("lf_setlock(out)", lock);
803	}
804#endif /* LOCKF_DEBUGGING */
805	return (0);
806}
807
808
809/*
810 * lf_clearlock
811 *
812 * Description:	Remove a byte-range lock on an vnode.  Generally, find the
813 *		lock (or an overlap to that lock) and remove it (or shrink
814 *		it), then wakeup anyone we can.
815 *
816 * Parameters:	unlock			The lock to clear
817 *
818 * Returns:	0			Success
819 *	lf_split:ENOLCK
820 *
821 * Notes:	A caller may unlock all the locks owned by the caller by
822 *		specifying the entire file range; locks owned by other
823 *		callers are not effected by this operation.
824 */
825static int
826lf_clearlock(struct lockf *unlock)
827{
828	struct lockf **head = unlock->lf_head;
829	struct lockf *lf = *head;
830	struct lockf *overlap, **prev;
831	overlap_t ovcase;
832
833	if (lf == NOLOCKF)
834		return (0);
835#ifdef LOCKF_DEBUGGING
836	if (unlock->lf_type != F_UNLCK)
837		panic("lf_clearlock: bad type");
838	if (lockf_debug & 1)
839		lf_print("lf_clearlock", unlock);
840#endif /* LOCKF_DEBUGGING */
841	prev = head;
842	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) != OVERLAP_NONE) {
843		/*
844		 * Wakeup the list of locks to be retried.
845		 */
846	        lf_wakelock(overlap, FALSE);
847#if IMPORTANCE_INHERITANCE
848		if (overlap->lf_boosted == LF_BOOSTED) {
849			lf_drop_assertion(overlap);
850		}
851#endif /* IMPORTANCE_INHERITANCE */
852
853		switch (ovcase) {
854		case OVERLAP_NONE:	/* satisfy compiler enum/switch */
855			break;
856
857		case OVERLAP_EQUALS_LOCK:
858			*prev = overlap->lf_next;
859			FREE(overlap, M_LOCKF);
860			break;
861
862		case OVERLAP_CONTAINS_LOCK: /* split it */
863			if (overlap->lf_start == unlock->lf_start) {
864				overlap->lf_start = unlock->lf_end + 1;
865				break;
866			}
867			/*
868			 * If we can't split the lock, we can't grant it.
869			 * Claim a system limit for the resource shortage.
870			 */
871			if (lf_split(overlap, unlock))
872				return (ENOLCK);
873			overlap->lf_next = unlock->lf_next;
874			break;
875
876		case OVERLAP_CONTAINED_BY_LOCK:
877			*prev = overlap->lf_next;
878			lf = overlap->lf_next;
879			FREE(overlap, M_LOCKF);
880			continue;
881
882		case OVERLAP_STARTS_BEFORE_LOCK:
883			overlap->lf_end = unlock->lf_start - 1;
884			prev = &overlap->lf_next;
885			lf = overlap->lf_next;
886			continue;
887
888		case OVERLAP_ENDS_AFTER_LOCK:
889			overlap->lf_start = unlock->lf_end + 1;
890			break;
891		}
892		break;
893	}
894#ifdef LOCKF_DEBUGGING
895	if (lockf_debug & 1)
896		lf_printlist("lf_clearlock", unlock);
897#endif /* LOCKF_DEBUGGING */
898	return (0);
899}
900
901
902/*
903 * lf_getlock
904 *
905 * Description:	Check whether there is a blocking lock, and if so return
906 *		its process identifier into the lock being requested.
907 *
908 * Parameters:	lock			Pointer to lock to test for blocks
909 *		fl			Pointer to flock structure to receive
910 *					the blocking lock information, if a
911 *					blocking lock is found.
912 *		matchpid		-1, or pid value to match in lookup.
913 *
914 * Returns:	0			Success
915 *
916 * Implicit Returns:
917 *		*fl			Contents modified to reflect the
918 *					blocking lock, if one is found; not
919 *					modified otherwise
920 *
921 * Notes:	fl->l_pid will be (-1) for file locks and will only be set to
922 *		the blocking process ID for advisory record locks.
923 */
924static int
925lf_getlock(struct lockf *lock, struct flock *fl, pid_t matchpid)
926{
927	struct lockf *block;
928
929#ifdef LOCKF_DEBUGGING
930	if (lockf_debug & 1)
931		lf_print("lf_getlock", lock);
932#endif /* LOCKF_DEBUGGING */
933
934	if ((block = lf_getblock(lock, matchpid))) {
935		fl->l_type = block->lf_type;
936		fl->l_whence = SEEK_SET;
937		fl->l_start = block->lf_start;
938		if (block->lf_end == -1)
939			fl->l_len = 0;
940		else
941			fl->l_len = block->lf_end - block->lf_start + 1;
942		if (block->lf_flags & F_POSIX)
943			fl->l_pid = proc_pid((struct proc *)(block->lf_id));
944		else
945			fl->l_pid = -1;
946	} else {
947		fl->l_type = F_UNLCK;
948	}
949	return (0);
950}
951
952/*
953 * lf_getblock
954 *
955 * Description:	Walk the list of locks for an inode and return the first
956 *		blocking lock.  A lock is considered blocking if we are not
957 *		the lock owner; otherwise, we are permitted to upgrade or
958 *		downgrade it, and it's not considered blocking.
959 *
960 * Parameters:	lock			The lock for which we are interested
961 *					in obtaining the blocking lock, if any
962 *		matchpid		-1, or pid value to match in lookup.
963 *
964 * Returns:	NOLOCKF			No blocking lock exists
965 *		!NOLOCKF		The address of the blocking lock's
966 *					struct lockf.
967 */
968static struct lockf *
969lf_getblock(struct lockf *lock, pid_t matchpid)
970{
971	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
972
973	for (prev = lock->lf_head;
974	    lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != OVERLAP_NONE;
975	    lf = overlap->lf_next) {
976		/*
977		 * Found an overlap.
978		 *
979		 * If we're matching pids, and it's a record lock,
980		 * but the pid doesn't match, then keep on looking ..
981		 */
982		if (matchpid != -1 &&
983		    (overlap->lf_flags & F_POSIX) != 0 &&
984		    proc_pid((struct proc *)(overlap->lf_id)) != matchpid)
985			continue;
986		/*
987		 * does it block us?
988		 */
989		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
990			return (overlap);
991	}
992	return (NOLOCKF);
993}
994
995
996/*
997 * lf_findoverlap
998 *
999 * Description:	Walk the list of locks to find an overlapping lock (if any).
1000 *
1001 * Parameters:	lf			First lock on lock list
1002 *		lock			The lock we are checking for an overlap
1003 *		check			Check type
1004 *		prev			pointer to pointer pointer to contain
1005 *					address of pointer to previous lock
1006 *					pointer to overlapping lock, if overlap
1007 *		overlap			pointer to pointer to contain address
1008 *					of overlapping lock
1009 *
1010 * Returns:	OVERLAP_NONE
1011 *		OVERLAP_EQUALS_LOCK
1012 *		OVERLAP_CONTAINS_LOCK
1013 *		OVERLAP_CONTAINED_BY_LOCK
1014 *		OVERLAP_STARTS_BEFORE_LOCK
1015 *		OVERLAP_ENDS_AFTER_LOCK
1016 *
1017 * Implicit Returns:
1018 *		*prev			The address of the next pointer in the
1019 *					lock previous to the overlapping lock;
1020 *					this is generally used to relink the
1021 *					lock list, avoiding a second iteration.
1022 *		*overlap		The pointer to the overlapping lock
1023 *					itself; this is used to return data in
1024 *					the check == OTHERS case, and for the
1025 *					caller to modify the overlapping lock,
1026 *					in the check == SELF case
1027 *
1028 * Note:	This returns only the FIRST overlapping lock.  There may be
1029 *		more than one.  lf_getlock will return the first blocking lock,
1030 *		while lf_setlock will iterate over all overlapping locks to
1031 *
1032 *		The check parameter can be SELF, meaning we are looking for
1033 *		overlapping locks owned by us, or it can be OTHERS, meaning
1034 *		we are looking for overlapping locks owned by someone else so
1035 *		we can report a blocking lock on an F_GETLK request.
1036 *
1037 *		The value of *overlap and *prev are modified, even if there is
1038 *		no overlapping lock found; always check the return code.
1039 */
1040static overlap_t
1041lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
1042	       struct lockf ***prev, struct lockf **overlap)
1043{
1044	off_t start, end;
1045	int found_self = 0;
1046
1047	*overlap = lf;
1048	if (lf == NOLOCKF)
1049		return (0);
1050#ifdef LOCKF_DEBUGGING
1051	if (lockf_debug & 2)
1052		lf_print("lf_findoverlap: looking for overlap in", lock);
1053#endif /* LOCKF_DEBUGGING */
1054	start = lock->lf_start;
1055	end = lock->lf_end;
1056	while (lf != NOLOCKF) {
1057		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
1058		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
1059			/*
1060			 * Locks belonging to one process are adjacent on the
1061			 * list, so if we've found any locks belonging to us,
1062			 * and we're now seeing something else, then we've
1063			 * examined all "self" locks.  Note that bailing out
1064			 * here is quite important; for coalescing, we assume
1065			 * numerically adjacent locks from the same owner to
1066			 * be adjacent on the list.
1067			 */
1068			if ((type & SELF) && found_self) {
1069				return OVERLAP_NONE;
1070			}
1071
1072			*prev = &lf->lf_next;
1073			*overlap = lf = lf->lf_next;
1074			continue;
1075		}
1076
1077		if ((type & SELF)) {
1078			found_self = 1;
1079		}
1080
1081#ifdef LOCKF_DEBUGGING
1082		if (lockf_debug & 2)
1083			lf_print("\tchecking", lf);
1084#endif /* LOCKF_DEBUGGING */
1085		/*
1086		 * OK, check for overlap
1087		 */
1088		if ((lf->lf_end != -1 && start > lf->lf_end) ||
1089		    (end != -1 && lf->lf_start > end)) {
1090			/* Case 0 */
1091			LOCKF_DEBUG(2, "no overlap\n");
1092
1093			/*
1094			 * NOTE: assumes that locks for the same process are
1095			 * nonintersecting and ordered.
1096			 */
1097			if ((type & SELF) && end != -1 && lf->lf_start > end)
1098				return (OVERLAP_NONE);
1099			*prev = &lf->lf_next;
1100			*overlap = lf = lf->lf_next;
1101			continue;
1102		}
1103		if ((lf->lf_start == start) && (lf->lf_end == end)) {
1104			LOCKF_DEBUG(2, "overlap == lock\n");
1105			return (OVERLAP_EQUALS_LOCK);
1106		}
1107		if ((lf->lf_start <= start) &&
1108		    (end != -1) &&
1109		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
1110			LOCKF_DEBUG(2, "overlap contains lock\n");
1111			return (OVERLAP_CONTAINS_LOCK);
1112		}
1113		if (start <= lf->lf_start &&
1114		           (end == -1 ||
1115			   (lf->lf_end != -1 && end >= lf->lf_end))) {
1116			LOCKF_DEBUG(2, "lock contains overlap\n");
1117			return (OVERLAP_CONTAINED_BY_LOCK);
1118		}
1119		if ((lf->lf_start < start) &&
1120			((lf->lf_end >= start) || (lf->lf_end == -1))) {
1121			LOCKF_DEBUG(2, "overlap starts before lock\n");
1122			return (OVERLAP_STARTS_BEFORE_LOCK);
1123		}
1124		if ((lf->lf_start > start) &&
1125			(end != -1) &&
1126			((lf->lf_end > end) || (lf->lf_end == -1))) {
1127			LOCKF_DEBUG(2, "overlap ends after lock\n");
1128			return (OVERLAP_ENDS_AFTER_LOCK);
1129		}
1130		panic("lf_findoverlap: default");
1131	}
1132	return (OVERLAP_NONE);
1133}
1134
1135
1136/*
1137 * lf_split
1138 *
1139 * Description:	Split a lock and a contained region into two or three locks
1140 *		as necessary.
1141 *
1142 * Parameters:	lock1			Lock to split
1143 *		lock2			Overlapping lock region requiring the
1144 *					split (upgrade/downgrade/unlock)
1145 *
1146 * Returns:	0			Success
1147 *		ENOLCK			No memory for new lock
1148 *
1149 * Implicit Returns:
1150 *		*lock1			Modified original lock
1151 *		*lock2			Overlapping lock (inserted into list)
1152 *		(new lock)		Potential new lock inserted into list
1153 *					if split results in 3 locks
1154 *
1155 * Notes:	This operation can only fail if the split would result in three
1156 *		locks, and there is insufficient memory to allocate the third
1157 *		lock; in that case, neither of the locks will be modified.
1158 */
1159static int
1160lf_split(struct lockf *lock1, struct lockf *lock2)
1161{
1162	struct lockf *splitlock;
1163
1164#ifdef LOCKF_DEBUGGING
1165	if (lockf_debug & 2) {
1166		lf_print("lf_split", lock1);
1167		lf_print("splitting from", lock2);
1168	}
1169#endif /* LOCKF_DEBUGGING */
1170	/*
1171	 * Check to see if spliting into only two pieces.
1172	 */
1173	if (lock1->lf_start == lock2->lf_start) {
1174		lock1->lf_start = lock2->lf_end + 1;
1175		lock2->lf_next = lock1;
1176		return (0);
1177	}
1178	if (lock1->lf_end == lock2->lf_end) {
1179		lock1->lf_end = lock2->lf_start - 1;
1180		lock2->lf_next = lock1->lf_next;
1181		lock1->lf_next = lock2;
1182		return (0);
1183	}
1184	/*
1185	 * Make a new lock consisting of the last part of
1186	 * the encompassing lock
1187	 */
1188	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
1189	if (splitlock == NULL)
1190		return (ENOLCK);
1191	bcopy(lock1, splitlock, sizeof *splitlock);
1192	splitlock->lf_start = lock2->lf_end + 1;
1193	TAILQ_INIT(&splitlock->lf_blkhd);
1194	lock1->lf_end = lock2->lf_start - 1;
1195	/*
1196	 * OK, now link it in
1197	 */
1198	splitlock->lf_next = lock1->lf_next;
1199	lock2->lf_next = splitlock;
1200	lock1->lf_next = lock2;
1201
1202	return (0);
1203}
1204
1205
1206/*
1207 * lf_wakelock
1208 *
1209 * Wakeup a blocklist in the case of a downgrade or unlock, since others
1210 * waiting on the lock may now be able to acquire it.
1211 *
1212 * Parameters:	listhead		Lock list head on which waiters may
1213 *					have pending locks
1214 *
1215 * Returns:	<void>
1216 *
1217 * Notes:	This function iterates a list of locks and wakes all waiters,
1218 *		rather than only waiters for the contended regions.  Because
1219 *		of this, for heavily contended files, this can result in a
1220 *		"thundering herd" situation.  Refactoring the code could make
1221 *		this operation more efficient, if heavy contention ever results
1222 *		in a real-world performance problem.
1223 */
1224static void
1225lf_wakelock(struct lockf *listhead, boolean_t force_all)
1226{
1227	struct lockf *wakelock;
1228	boolean_t wake_all = TRUE;
1229
1230	if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE))
1231	        wake_all = FALSE;
1232
1233	while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1234		wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
1235		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
1236
1237		wakelock->lf_next = NOLOCKF;
1238#ifdef LOCKF_DEBUGGING
1239		if (lockf_debug & 2)
1240			lf_print("lf_wakelock: awakening", wakelock);
1241#endif /* LOCKF_DEBUGGING */
1242		if (wake_all == FALSE) {
1243			/*
1244			 * If there are items on the list head block list,
1245			 * move them to the wakelock list instead, and then
1246			 * correct their lf_next pointers.
1247			 */
1248			if (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1249				TAILQ_CONCAT(&wakelock->lf_blkhd, &listhead->lf_blkhd, lf_block);
1250
1251			        struct lockf *tlock;
1252
1253			        TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
1254					if (TAILQ_NEXT(tlock, lf_block) == tlock) {
1255						/* See rdar://10887303 */
1256						panic("cycle in wakelock list");
1257					}
1258				        tlock->lf_next = wakelock;
1259				}
1260			}
1261		}
1262		wakeup(wakelock);
1263
1264		if (wake_all == FALSE)
1265		        break;
1266	}
1267}
1268
1269
1270#ifdef LOCKF_DEBUGGING
1271/*
1272 * lf_print DEBUG
1273 *
1274 * Print out a lock; lock information is prefixed by the string in 'tag'
1275 *
1276 * Parameters:	tag			A string tag for debugging
1277 *		lock			The lock whose information should be
1278 *					displayed
1279 *
1280 * Returns:	<void>
1281 */
1282void
1283lf_print(const char *tag, struct lockf *lock)
1284{
1285	printf("%s: lock %p for ", tag, (void *)lock);
1286	if (lock->lf_flags & F_POSIX)
1287		printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
1288	else
1289		printf("id %p", (void *)lock->lf_id);
1290	if (lock->lf_vnode != 0)
1291		printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
1292		    lock->lf_vnode,
1293		    lock->lf_type == F_RDLCK ? "shared" :
1294		    lock->lf_type == F_WRLCK ? "exclusive" :
1295		    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1296		    (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1297	else
1298		printf(" %s, start 0x%016llx, end 0x%016llx",
1299		    lock->lf_type == F_RDLCK ? "shared" :
1300		    lock->lf_type == F_WRLCK ? "exclusive" :
1301		    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1302		    (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1303	if (!TAILQ_EMPTY(&lock->lf_blkhd))
1304		printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
1305	else
1306		printf("\n");
1307}
1308
1309
1310/*
1311 * lf_printlist DEBUG
1312 *
1313 * Print out a lock list for the vnode associated with 'lock'; lock information
1314 * is prefixed by the string in 'tag'
1315 *
1316 * Parameters:	tag			A string tag for debugging
1317 *		lock			The lock whose vnode's lock list should
1318 *					be displayed
1319 *
1320 * Returns:	<void>
1321 */
1322void
1323lf_printlist(const char *tag, struct lockf *lock)
1324{
1325	struct lockf *lf, *blk;
1326
1327	if (lock->lf_vnode == 0)
1328		return;
1329
1330	printf("%s: Lock list for vno %p:\n",
1331	    tag, lock->lf_vnode);
1332	for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {
1333		printf("\tlock %p for ",(void *)lf);
1334		if (lf->lf_flags & F_POSIX)
1335			printf("proc %ld",
1336			    (long)((struct proc *)lf->lf_id)->p_pid);
1337		else
1338			printf("id %p", (void *)lf->lf_id);
1339		printf(", %s, start 0x%016llx, end 0x%016llx",
1340		    lf->lf_type == F_RDLCK ? "shared" :
1341		    lf->lf_type == F_WRLCK ? "exclusive" :
1342		    lf->lf_type == F_UNLCK ? "unlock" :
1343		    "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
1344		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
1345			printf("\n\t\tlock request %p for ", (void *)blk);
1346			if (blk->lf_flags & F_POSIX)
1347				printf("proc %ld",
1348				    (long)((struct proc *)blk->lf_id)->p_pid);
1349			else
1350				printf("id %p", (void *)blk->lf_id);
1351			printf(", %s, start 0x%016llx, end 0x%016llx",
1352			    blk->lf_type == F_RDLCK ? "shared" :
1353			    blk->lf_type == F_WRLCK ? "exclusive" :
1354			    blk->lf_type == F_UNLCK ? "unlock" :
1355			    "unknown", (intmax_t)blk->lf_start,
1356			    (intmax_t)blk->lf_end);
1357			if (!TAILQ_EMPTY(&blk->lf_blkhd))
1358				panic("lf_printlist: bad list");
1359		}
1360		printf("\n");
1361	}
1362}
1363#endif /* LOCKF_DEBUGGING */
1364
1365#if IMPORTANCE_INHERITANCE
1366
1367/*
1368 * lf_hold_assertion
1369 *
1370 * Call task importance hold assertion on the owner of the lock.
1371 *
1372 * Parameters: block_task               Owner of the lock blocking
1373 *                                      current thread.
1374 *
1375 *             block                    lock on which the current thread
1376 *                                      is blocking on.
1377 *
1378 * Returns:    <void>
1379 *
1380 * Notes: The task reference on block_task is not needed to be hold since
1381 *        the current thread has vnode lock and block_task has a file
1382 *        lock, thus removing file lock in exit requires block_task to
1383 *        grab the vnode lock.
1384 */
1385static void
1386lf_hold_assertion(task_t block_task, struct lockf *block)
1387{
1388	if (task_importance_hold_file_lock_assertion(block_task, 1)) {
1389		block->lf_boosted = LF_BOOSTED;
1390	}
1391}
1392
1393
1394/*
1395 * lf_jump_to_queue_head
1396 *
1397 * Jump the lock from the tail of the block queue to the head of
1398 * the queue.
1399 *
1400 * Parameters: block                    lockf struct containing the
1401 *                                      block queue.
1402 *             lock                     lockf struct to be jumped to the
1403 *                                      front.
1404 *
1405 * Returns:    <void>
1406 */
1407static void
1408lf_jump_to_queue_head(struct lockf *block, struct lockf *lock)
1409{
1410	/* Move the lock to the head of the block queue. */
1411	TAILQ_REMOVE(&block->lf_blkhd, lock, lf_block);
1412	TAILQ_INSERT_HEAD(&block->lf_blkhd, lock, lf_block);
1413}
1414
1415
1416/*
1417 * lf_drop_assertion
1418 *
1419 * Drops the task hold assertion.
1420 *
1421 * Parameters: block                    lockf struct holding the assertion.
1422 *
1423 * Returns:    <void>
1424 */
1425static void
1426lf_drop_assertion(struct lockf *block)
1427{
1428	task_t current_task;
1429
1430	current_task = proc_task((proc_t) block->lf_id);
1431	task_importance_drop_file_lock_assertion(current_task, 1);
1432	block->lf_boosted = LF_NOT_BOOSTED;
1433}
1434
1435#endif /* IMPORTANCE_INHERITANCE */
1436