vfs_vnops.c revision 74437
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39 * $FreeBSD: head/sys/kern/vfs_vnops.c 74437 2001-03-19 05:44:15Z rwatson $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/fcntl.h>
45#include <sys/file.h>
46#include <sys/stat.h>
47#include <sys/proc.h>
48#include <sys/mount.h>
49#include <sys/mutex.h>
50#include <sys/namei.h>
51#include <sys/vnode.h>
52#include <sys/bio.h>
53#include <sys/buf.h>
54#include <sys/filio.h>
55#include <sys/ttycom.h>
56#include <sys/conf.h>
57
58static int vn_closefile __P((struct file *fp, struct proc *p));
59static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
60		struct proc *p));
61static int vn_read __P((struct file *fp, struct uio *uio,
62		struct ucred *cred, int flags, struct proc *p));
63static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
64		struct proc *p));
65static int vn_kqfilter __P((struct file *fp, struct knote *kn));
66static int vn_statfile __P((struct file *fp, struct stat *sb, struct proc *p));
67static int vn_write __P((struct file *fp, struct uio *uio,
68		struct ucred *cred, int flags, struct proc *p));
69
70struct 	fileops vnops = {
71	vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
72	vn_statfile, vn_closefile
73};
74
75/*
76 * Common code for vnode open operations.
77 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
78 *
79 * Note that this does NOT free nameidata for the successful case,
80 * due to the NDINIT being done elsewhere.
81 */
82int
83vn_open(ndp, flagp, cmode)
84	register struct nameidata *ndp;
85	int *flagp, cmode;
86{
87	struct vnode *vp;
88	struct mount *mp;
89	struct proc *p = ndp->ni_cnd.cn_proc;
90	struct ucred *cred = p->p_ucred;
91	struct vattr vat;
92	struct vattr *vap = &vat;
93	int mode, fmode, error;
94
95restart:
96	fmode = *flagp;
97	if (fmode & O_CREAT) {
98		ndp->ni_cnd.cn_nameiop = CREATE;
99		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
100		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
101			ndp->ni_cnd.cn_flags |= FOLLOW;
102		bwillwrite();
103		if ((error = namei(ndp)) != 0)
104			return (error);
105		if (ndp->ni_vp == NULL) {
106			VATTR_NULL(vap);
107			vap->va_type = VREG;
108			vap->va_mode = cmode;
109			if (fmode & O_EXCL)
110				vap->va_vaflags |= VA_EXCLUSIVE;
111			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
112				NDFREE(ndp, NDF_ONLY_PNBUF);
113				vput(ndp->ni_dvp);
114				if ((error = vn_start_write(NULL, &mp,
115				    V_XSLEEP | PCATCH)) != 0)
116					return (error);
117				goto restart;
118			}
119			VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
120			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
121					   &ndp->ni_cnd, vap);
122			vput(ndp->ni_dvp);
123			vn_finished_write(mp);
124			if (error) {
125				NDFREE(ndp, NDF_ONLY_PNBUF);
126				return (error);
127			}
128			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
129			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
130			fmode &= ~O_TRUNC;
131			vp = ndp->ni_vp;
132		} else {
133			if (ndp->ni_dvp == ndp->ni_vp)
134				vrele(ndp->ni_dvp);
135			else
136				vput(ndp->ni_dvp);
137			ndp->ni_dvp = NULL;
138			vp = ndp->ni_vp;
139			if (fmode & O_EXCL) {
140				error = EEXIST;
141				goto bad;
142			}
143			fmode &= ~O_CREAT;
144		}
145	} else {
146		ndp->ni_cnd.cn_nameiop = LOOKUP;
147		ndp->ni_cnd.cn_flags =
148		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
149		if ((error = namei(ndp)) != 0)
150			return (error);
151		vp = ndp->ni_vp;
152	}
153	if (vp->v_type == VLNK) {
154		error = EMLINK;
155		goto bad;
156	}
157	if (vp->v_type == VSOCK) {
158		error = EOPNOTSUPP;
159		goto bad;
160	}
161	if ((fmode & O_CREAT) == 0) {
162		mode = 0;
163		if (fmode & (FWRITE | O_TRUNC)) {
164			if (vp->v_type == VDIR) {
165				error = EISDIR;
166				goto bad;
167			}
168			error = vn_writechk(vp);
169			if (error)
170				goto bad;
171			mode |= VWRITE;
172		}
173		if (fmode & FREAD)
174			mode |= VREAD;
175		if (mode) {
176		        error = VOP_ACCESS(vp, mode, cred, p);
177			if (error)
178				goto bad;
179		}
180	}
181	if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0)
182		goto bad;
183	/*
184	 * Make sure that a VM object is created for VMIO support.
185	 */
186	if (vn_canvmio(vp) == TRUE) {
187		if ((error = vfs_object_create(vp, p, cred)) != 0)
188			goto bad;
189	}
190
191	if (fmode & FWRITE)
192		vp->v_writecount++;
193	*flagp = fmode;
194	return (0);
195bad:
196	NDFREE(ndp, NDF_ONLY_PNBUF);
197	vput(vp);
198	*flagp = fmode;
199	return (error);
200}
201
202/*
203 * Check for write permissions on the specified vnode.
204 * Prototype text segments cannot be written.
205 */
206int
207vn_writechk(vp)
208	register struct vnode *vp;
209{
210
211	/*
212	 * If there's shared text associated with
213	 * the vnode, try to free it up once.  If
214	 * we fail, we can't allow writing.
215	 */
216	if (vp->v_flag & VTEXT)
217		return (ETXTBSY);
218	return (0);
219}
220
221/*
222 * Vnode close call
223 */
224int
225vn_close(vp, flags, cred, p)
226	register struct vnode *vp;
227	int flags;
228	struct ucred *cred;
229	struct proc *p;
230{
231	int error;
232
233	if (flags & FWRITE)
234		vp->v_writecount--;
235	error = VOP_CLOSE(vp, flags, cred, p);
236	vrele(vp);
237	return (error);
238}
239
240static __inline
241int
242sequential_heuristic(struct uio *uio, struct file *fp)
243{
244	/*
245	 * Sequential heuristic - detect sequential operation
246	 */
247	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
248	    uio->uio_offset == fp->f_nextoff) {
249		/*
250		 * XXX we assume that the filesystem block size is
251		 * the default.  Not true, but still gives us a pretty
252		 * good indicator of how sequential the read operations
253		 * are.
254		 */
255		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
256		if (fp->f_seqcount >= 127)
257			fp->f_seqcount = 127;
258		return(fp->f_seqcount << 16);
259	}
260
261	/*
262	 * Not sequential, quick draw-down of seqcount
263	 */
264	if (fp->f_seqcount > 1)
265		fp->f_seqcount = 1;
266	else
267		fp->f_seqcount = 0;
268	return(0);
269}
270
271/*
272 * Package up an I/O request on a vnode into a uio and do it.
273 */
274int
275vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
276	enum uio_rw rw;
277	struct vnode *vp;
278	caddr_t base;
279	int len;
280	off_t offset;
281	enum uio_seg segflg;
282	int ioflg;
283	struct ucred *cred;
284	int *aresid;
285	struct proc *p;
286{
287	struct uio auio;
288	struct iovec aiov;
289	struct mount *mp;
290	int error;
291
292	if ((ioflg & IO_NODELOCKED) == 0) {
293		mp = NULL;
294		if (rw == UIO_WRITE &&
295		    vp->v_type != VCHR &&
296		    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
297			return (error);
298		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
299	}
300	auio.uio_iov = &aiov;
301	auio.uio_iovcnt = 1;
302	aiov.iov_base = base;
303	aiov.iov_len = len;
304	auio.uio_resid = len;
305	auio.uio_offset = offset;
306	auio.uio_segflg = segflg;
307	auio.uio_rw = rw;
308	auio.uio_procp = p;
309	if (rw == UIO_READ) {
310		error = VOP_READ(vp, &auio, ioflg, cred);
311	} else {
312		error = VOP_WRITE(vp, &auio, ioflg, cred);
313	}
314	if (aresid)
315		*aresid = auio.uio_resid;
316	else
317		if (auio.uio_resid && error == 0)
318			error = EIO;
319	if ((ioflg & IO_NODELOCKED) == 0) {
320		vn_finished_write(mp);
321		VOP_UNLOCK(vp, 0, p);
322	}
323	return (error);
324}
325
326/*
327 * File table vnode read routine.
328 */
329static int
330vn_read(fp, uio, cred, flags, p)
331	struct file *fp;
332	struct uio *uio;
333	struct ucred *cred;
334	struct proc *p;
335	int flags;
336{
337	struct vnode *vp;
338	int error, ioflag;
339
340	KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
341	    uio->uio_procp, p));
342	vp = (struct vnode *)fp->f_data;
343	ioflag = 0;
344	if (fp->f_flag & FNONBLOCK)
345		ioflag |= IO_NDELAY;
346	VOP_LEASE(vp, p, cred, LEASE_READ);
347	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, p);
348	if ((flags & FOF_OFFSET) == 0)
349		uio->uio_offset = fp->f_offset;
350
351	ioflag |= sequential_heuristic(uio, fp);
352
353	error = VOP_READ(vp, uio, ioflag, cred);
354	if ((flags & FOF_OFFSET) == 0)
355		fp->f_offset = uio->uio_offset;
356	fp->f_nextoff = uio->uio_offset;
357	VOP_UNLOCK(vp, 0, p);
358	return (error);
359}
360
361/*
362 * File table vnode write routine.
363 */
364static int
365vn_write(fp, uio, cred, flags, p)
366	struct file *fp;
367	struct uio *uio;
368	struct ucred *cred;
369	struct proc *p;
370	int flags;
371{
372	struct vnode *vp;
373	struct mount *mp;
374	int error, ioflag;
375
376	KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
377	    uio->uio_procp, p));
378	vp = (struct vnode *)fp->f_data;
379	if (vp->v_type == VREG)
380		bwillwrite();
381	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
382	ioflag = IO_UNIT;
383	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
384		ioflag |= IO_APPEND;
385	if (fp->f_flag & FNONBLOCK)
386		ioflag |= IO_NDELAY;
387	if ((fp->f_flag & O_FSYNC) ||
388	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
389		ioflag |= IO_SYNC;
390	mp = NULL;
391	if (vp->v_type != VCHR &&
392	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
393		return (error);
394	VOP_LEASE(vp, p, cred, LEASE_WRITE);
395	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
396	if ((flags & FOF_OFFSET) == 0)
397		uio->uio_offset = fp->f_offset;
398	ioflag |= sequential_heuristic(uio, fp);
399	error = VOP_WRITE(vp, uio, ioflag, cred);
400	if ((flags & FOF_OFFSET) == 0)
401		fp->f_offset = uio->uio_offset;
402	fp->f_nextoff = uio->uio_offset;
403	VOP_UNLOCK(vp, 0, p);
404	vn_finished_write(mp);
405	return (error);
406}
407
408/*
409 * File table vnode stat routine.
410 */
411static int
412vn_statfile(fp, sb, p)
413	struct file *fp;
414	struct stat *sb;
415	struct proc *p;
416{
417	struct vnode *vp = (struct vnode *)fp->f_data;
418
419	return vn_stat(vp, sb, p);
420}
421
422int
423vn_stat(vp, sb, p)
424	struct vnode *vp;
425	register struct stat *sb;
426	struct proc *p;
427{
428	struct vattr vattr;
429	register struct vattr *vap;
430	int error;
431	u_short mode;
432
433	vap = &vattr;
434	error = VOP_GETATTR(vp, vap, p->p_ucred, p);
435	if (error)
436		return (error);
437
438	/*
439	 * Zero the spare stat fields
440	 */
441	sb->st_lspare = 0;
442	sb->st_qspare[0] = 0;
443	sb->st_qspare[1] = 0;
444
445	/*
446	 * Copy from vattr table
447	 */
448	if (vap->va_fsid != VNOVAL)
449		sb->st_dev = vap->va_fsid;
450	else
451		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
452	sb->st_ino = vap->va_fileid;
453	mode = vap->va_mode;
454	switch (vap->va_type) {
455	case VREG:
456		mode |= S_IFREG;
457		break;
458	case VDIR:
459		mode |= S_IFDIR;
460		break;
461	case VBLK:
462		mode |= S_IFBLK;
463		break;
464	case VCHR:
465		mode |= S_IFCHR;
466		break;
467	case VLNK:
468		mode |= S_IFLNK;
469		/* This is a cosmetic change, symlinks do not have a mode. */
470		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
471			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
472		else
473			sb->st_mode |= ACCESSPERMS;	/* 0777 */
474		break;
475	case VSOCK:
476		mode |= S_IFSOCK;
477		break;
478	case VFIFO:
479		mode |= S_IFIFO;
480		break;
481	default:
482		return (EBADF);
483	};
484	sb->st_mode = mode;
485	sb->st_nlink = vap->va_nlink;
486	sb->st_uid = vap->va_uid;
487	sb->st_gid = vap->va_gid;
488	sb->st_rdev = vap->va_rdev;
489	sb->st_size = vap->va_size;
490	sb->st_atimespec = vap->va_atime;
491	sb->st_mtimespec = vap->va_mtime;
492	sb->st_ctimespec = vap->va_ctime;
493
494        /*
495	 * According to www.opengroup.org, the meaning of st_blksize is
496	 *   "a filesystem-specific preferred I/O block size for this
497	 *    object.  In some filesystem types, this may vary from file
498	 *    to file"
499	 * Default to zero to catch bogus uses of this field.
500	 */
501
502	if (vap->va_type == VREG) {
503		sb->st_blksize = vap->va_blocksize;
504	} else if (vn_isdisk(vp, NULL)) {
505		sb->st_blksize = vp->v_rdev->si_bsize_best;
506		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
507			sb->st_blksize = vp->v_rdev->si_bsize_phys;
508		if (sb->st_blksize < BLKDEV_IOSIZE)
509			sb->st_blksize = BLKDEV_IOSIZE;
510	} else {
511		sb->st_blksize = 0;
512	}
513
514	sb->st_flags = vap->va_flags;
515	if (suser_xxx(p->p_ucred, 0, 0))
516		sb->st_gen = 0;
517	else
518		sb->st_gen = vap->va_gen;
519
520#if (S_BLKSIZE == 512)
521	/* Optimize this case */
522	sb->st_blocks = vap->va_bytes >> 9;
523#else
524	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
525#endif
526	return (0);
527}
528
529/*
530 * File table vnode ioctl routine.
531 */
532static int
533vn_ioctl(fp, com, data, p)
534	struct file *fp;
535	u_long com;
536	caddr_t data;
537	struct proc *p;
538{
539	register struct vnode *vp = ((struct vnode *)fp->f_data);
540	struct vattr vattr;
541	int error;
542
543	switch (vp->v_type) {
544
545	case VREG:
546	case VDIR:
547		if (com == FIONREAD) {
548			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
549			if (error)
550				return (error);
551			*(int *)data = vattr.va_size - fp->f_offset;
552			return (0);
553		}
554		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
555			return (0);			/* XXX */
556		/* fall into ... */
557
558	default:
559#if 0
560		return (ENOTTY);
561#endif
562	case VFIFO:
563	case VCHR:
564	case VBLK:
565		if (com == FIODTYPE) {
566			if (vp->v_type != VCHR && vp->v_type != VBLK)
567				return (ENOTTY);
568			*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
569			return (0);
570		}
571		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
572		if (error == 0 && com == TIOCSCTTY) {
573
574			/* Do nothing if reassigning same control tty */
575			if (p->p_session->s_ttyvp == vp)
576				return (0);
577
578			/* Get rid of reference to old control tty */
579			if (p->p_session->s_ttyvp)
580				vrele(p->p_session->s_ttyvp);
581
582			p->p_session->s_ttyvp = vp;
583			VREF(vp);
584		}
585		return (error);
586	}
587}
588
589/*
590 * File table vnode poll routine.
591 */
592static int
593vn_poll(fp, events, cred, p)
594	struct file *fp;
595	int events;
596	struct ucred *cred;
597	struct proc *p;
598{
599
600	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, p));
601}
602
603/*
604 * Check that the vnode is still valid, and if so
605 * acquire requested lock.
606 */
607int
608#ifndef	DEBUG_LOCKS
609vn_lock(vp, flags, p)
610#else
611debug_vn_lock(vp, flags, p, filename, line)
612#endif
613	struct vnode *vp;
614	int flags;
615	struct proc *p;
616#ifdef	DEBUG_LOCKS
617	const char *filename;
618	int line;
619#endif
620{
621	int error;
622
623	do {
624		if ((flags & LK_INTERLOCK) == 0)
625			mtx_lock(&vp->v_interlock);
626		if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) {
627			vp->v_flag |= VXWANT;
628			mtx_unlock(&vp->v_interlock);
629			tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
630			error = ENOENT;
631		} else {
632			if (vp->v_vxproc != NULL)
633				printf("VXLOCK interlock avoided in vn_lock\n");
634#ifdef	DEBUG_LOCKS
635			vp->filename = filename;
636			vp->line = line;
637#endif
638			error = VOP_LOCK(vp,
639				    flags | LK_NOPAUSE | LK_INTERLOCK, p);
640			if (error == 0)
641				return (error);
642		}
643		flags &= ~LK_INTERLOCK;
644	} while (flags & LK_RETRY);
645	return (error);
646}
647
648/*
649 * File table vnode close routine.
650 */
651static int
652vn_closefile(fp, p)
653	struct file *fp;
654	struct proc *p;
655{
656
657	fp->f_ops = &badfileops;
658	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
659		fp->f_cred, p));
660}
661
662/*
663 * Preparing to start a filesystem write operation. If the operation is
664 * permitted, then we bump the count of operations in progress and
665 * proceed. If a suspend request is in progress, we wait until the
666 * suspension is over, and then proceed.
667 */
668int
669vn_start_write(vp, mpp, flags)
670	struct vnode *vp;
671	struct mount **mpp;
672	int flags;
673{
674	struct mount *mp;
675	int error;
676
677	/*
678	 * If a vnode is provided, get and return the mount point that
679	 * to which it will write.
680	 */
681	if (vp != NULL) {
682		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
683			*mpp = NULL;
684			if (error != EOPNOTSUPP)
685				return (error);
686			return (0);
687		}
688	}
689	if ((mp = *mpp) == NULL)
690		return (0);
691	/*
692	 * Check on status of suspension.
693	 */
694	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
695		if (flags & V_NOWAIT)
696			return (EWOULDBLOCK);
697		error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
698		    "suspfs", 0);
699		if (error)
700			return (error);
701	}
702	if (flags & V_XSLEEP)
703		return (0);
704	mp->mnt_writeopcount++;
705	return (0);
706}
707
708/*
709 * Secondary suspension. Used by operations such as vop_inactive
710 * routines that are needed by the higher level functions. These
711 * are allowed to proceed until all the higher level functions have
712 * completed (indicated by mnt_writeopcount dropping to zero). At that
713 * time, these operations are halted until the suspension is over.
714 */
715int
716vn_write_suspend_wait(vp, mp, flags)
717	struct vnode *vp;
718	struct mount *mp;
719	int flags;
720{
721	int error;
722
723	if (vp != NULL) {
724		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
725			if (error != EOPNOTSUPP)
726				return (error);
727			return (0);
728		}
729	}
730	/*
731	 * If we are not suspended or have not yet reached suspended
732	 * mode, then let the operation proceed.
733	 */
734	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
735		return (0);
736	if (flags & V_NOWAIT)
737		return (EWOULDBLOCK);
738	/*
739	 * Wait for the suspension to finish.
740	 */
741	return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
742	    "suspfs", 0));
743}
744
745/*
746 * Filesystem write operation has completed. If we are suspending and this
747 * operation is the last one, notify the suspender that the suspension is
748 * now in effect.
749 */
750void
751vn_finished_write(mp)
752	struct mount *mp;
753{
754
755	if (mp == NULL)
756		return;
757	mp->mnt_writeopcount--;
758	if (mp->mnt_writeopcount < 0)
759		panic("vn_finished_write: neg cnt");
760	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
761	    mp->mnt_writeopcount <= 0)
762		wakeup(&mp->mnt_writeopcount);
763}
764
765/*
766 * Request a filesystem to suspend write operations.
767 */
768void
769vfs_write_suspend(mp)
770	struct mount *mp;
771{
772	struct proc *p = curproc;
773
774	if (mp->mnt_kern_flag & MNTK_SUSPEND)
775		return;
776	mp->mnt_kern_flag |= MNTK_SUSPEND;
777	if (mp->mnt_writeopcount > 0)
778		(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
779	VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p);
780	mp->mnt_kern_flag |= MNTK_SUSPENDED;
781}
782
783/*
784 * Request a filesystem to resume write operations.
785 */
786void
787vfs_write_resume(mp)
788	struct mount *mp;
789{
790
791	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
792		return;
793	mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
794	wakeup(&mp->mnt_writeopcount);
795	wakeup(&mp->mnt_flag);
796}
797
798static int
799vn_kqfilter(struct file *fp, struct knote *kn)
800{
801
802	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
803}
804
805/*
806 * Simplified in-kernel wrapper calls for extended attribute access.
807 * Both calls pass in a NULL credential, authorizing as "kernel" access.
808 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
809 */
810int
811vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
812    const char *attrname, int *buflen, char *buf, struct proc *p)
813{
814	struct uio	auio;
815	struct iovec	iov;
816	int	error;
817
818	iov.iov_len = *buflen;
819	iov.iov_base = buf;
820
821	auio.uio_iov = &iov;
822	auio.uio_iovcnt = 1;
823	auio.uio_rw = UIO_READ;
824	auio.uio_segflg = UIO_SYSSPACE;
825	auio.uio_procp = p;
826	auio.uio_offset = 0;
827	auio.uio_resid = *buflen;
828
829	if ((ioflg & IO_NODELOCKED) == 0)
830		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
831
832	/* authorize attribute retrieval as kernel */
833	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, p);
834
835	if ((ioflg & IO_NODELOCKED) == 0)
836		VOP_UNLOCK(vp, 0, p);
837
838	if (error == 0) {
839		*buflen = *buflen - auio.uio_resid;
840	}
841
842	return (error);
843}
844
845/*
846 * XXX failure mode if partially written?
847 */
848int
849vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
850    const char *attrname, int buflen, char *buf, struct proc *p)
851{
852	struct uio	auio;
853	struct iovec	iov;
854	struct mount	*mp;
855	int	error;
856
857	iov.iov_len = buflen;
858	iov.iov_base = buf;
859
860	auio.uio_iov = &iov;
861	auio.uio_iovcnt = 1;
862	auio.uio_rw = UIO_WRITE;
863	auio.uio_segflg = UIO_SYSSPACE;
864	auio.uio_procp = p;
865	auio.uio_offset = 0;
866	auio.uio_resid = buflen;
867
868	if ((ioflg & IO_NODELOCKED) == 0) {
869		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
870			return (error);
871		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
872	}
873
874	/* authorize attribute setting as kernel */
875	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, p);
876
877	if ((ioflg & IO_NODELOCKED) == 0) {
878		vn_finished_write(mp);
879		VOP_UNLOCK(vp, 0, p);
880	}
881
882	return (error);
883}
884
885int
886vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
887    const char *attrname, struct proc *p)
888{
889	struct mount	*mp;
890	int	error;
891
892	if ((ioflg & IO_NODELOCKED) == 0) {
893		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
894			return (error);
895		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
896	}
897
898	/* authorize attribute removal as kernel */
899	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, p);
900
901	if ((ioflg & IO_NODELOCKED) == 0) {
902		vn_finished_write(mp);
903		VOP_UNLOCK(vp, 0, p);
904	}
905
906	return (error);
907}
908