nfs_bio.c revision 147420
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 147420 2005-06-16 15:43:17Z green $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bio.h>
41#include <sys/buf.h>
42#include <sys/kernel.h>
43#include <sys/mount.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/signalvar.h>
47#include <sys/vmmeter.h>
48#include <sys/vnode.h>
49
50#include <vm/vm.h>
51#include <vm/vm_extern.h>
52#include <vm/vm_page.h>
53#include <vm/vm_object.h>
54#include <vm/vm_pager.h>
55#include <vm/vnode_pager.h>
56
57#include <rpc/rpcclnt.h>
58
59#include <nfs/rpcv2.h>
60#include <nfs/nfsproto.h>
61#include <nfsclient/nfs.h>
62#include <nfsclient/nfsmount.h>
63#include <nfsclient/nfsnode.h>
64
65#include <nfs4client/nfs4.h>
66
67static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
68		    struct thread *td);
69static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
70			      struct ucred *cred, int ioflag);
71
72extern int nfs_directio_enable;
73extern int nfs_directio_allow_mmap;
74/*
75 * Vnode op for VM getpages.
76 */
77int
78nfs_getpages(struct vop_getpages_args *ap)
79{
80	int i, error, nextoff, size, toff, count, npages;
81	struct uio uio;
82	struct iovec iov;
83	vm_offset_t kva;
84	struct buf *bp;
85	struct vnode *vp;
86	struct thread *td;
87	struct ucred *cred;
88	struct nfsmount *nmp;
89	vm_object_t object;
90	vm_page_t *pages;
91	struct nfsnode *np;
92
93	GIANT_REQUIRED;
94
95	vp = ap->a_vp;
96	np = VTONFS(vp);
97	td = curthread;				/* XXX */
98	cred = curthread->td_ucred;		/* XXX */
99	nmp = VFSTONFS(vp->v_mount);
100	pages = ap->a_m;
101	count = ap->a_count;
102
103	if ((object = vp->v_object) == NULL) {
104		printf("nfs_getpages: called with non-merged cache vnode??\n");
105		return VM_PAGER_ERROR;
106	}
107
108	if (!nfs_directio_allow_mmap && (np->n_flag & NNONCACHE) &&
109	    (vp->v_type == VREG)) {
110		printf("nfs_getpages: called on non-cacheable vnode??\n");
111		return VM_PAGER_ERROR;
112	}
113
114	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
115	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
116		/* We'll never get here for v4, because we always have fsinfo */
117		(void)nfs_fsinfo(nmp, vp, cred, td);
118	}
119
120	npages = btoc(count);
121
122	/*
123	 * If the requested page is partially valid, just return it and
124	 * allow the pager to zero-out the blanks.  Partially valid pages
125	 * can only occur at the file EOF.
126	 */
127
128	{
129		vm_page_t m = pages[ap->a_reqpage];
130
131		VM_OBJECT_LOCK(object);
132		vm_page_lock_queues();
133		if (m->valid != 0) {
134			/* handled by vm_fault now	  */
135			/* vm_page_zero_invalid(m, TRUE); */
136			for (i = 0; i < npages; ++i) {
137				if (i != ap->a_reqpage)
138					vm_page_free(pages[i]);
139			}
140			vm_page_unlock_queues();
141			VM_OBJECT_UNLOCK(object);
142			return(0);
143		}
144		vm_page_unlock_queues();
145		VM_OBJECT_UNLOCK(object);
146	}
147
148	/*
149	 * We use only the kva address for the buffer, but this is extremely
150	 * convienient and fast.
151	 */
152	bp = getpbuf(&nfs_pbuf_freecnt);
153
154	kva = (vm_offset_t) bp->b_data;
155	pmap_qenter(kva, pages, npages);
156	cnt.v_vnodein++;
157	cnt.v_vnodepgsin += npages;
158
159	iov.iov_base = (caddr_t) kva;
160	iov.iov_len = count;
161	uio.uio_iov = &iov;
162	uio.uio_iovcnt = 1;
163	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
164	uio.uio_resid = count;
165	uio.uio_segflg = UIO_SYSSPACE;
166	uio.uio_rw = UIO_READ;
167	uio.uio_td = td;
168
169	error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred);
170	pmap_qremove(kva, npages);
171
172	relpbuf(bp, &nfs_pbuf_freecnt);
173
174	if (error && (uio.uio_resid == count)) {
175		printf("nfs_getpages: error %d\n", error);
176		VM_OBJECT_LOCK(object);
177		vm_page_lock_queues();
178		for (i = 0; i < npages; ++i) {
179			if (i != ap->a_reqpage)
180				vm_page_free(pages[i]);
181		}
182		vm_page_unlock_queues();
183		VM_OBJECT_UNLOCK(object);
184		return VM_PAGER_ERROR;
185	}
186
187	/*
188	 * Calculate the number of bytes read and validate only that number
189	 * of bytes.  Note that due to pending writes, size may be 0.  This
190	 * does not mean that the remaining data is invalid!
191	 */
192
193	size = count - uio.uio_resid;
194	VM_OBJECT_LOCK(object);
195	vm_page_lock_queues();
196	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
197		vm_page_t m;
198		nextoff = toff + PAGE_SIZE;
199		m = pages[i];
200
201		if (nextoff <= size) {
202			/*
203			 * Read operation filled an entire page
204			 */
205			m->valid = VM_PAGE_BITS_ALL;
206			vm_page_undirty(m);
207		} else if (size > toff) {
208			/*
209			 * Read operation filled a partial page.
210			 */
211			m->valid = 0;
212			vm_page_set_validclean(m, 0, size - toff);
213			/* handled by vm_fault now	  */
214			/* vm_page_zero_invalid(m, TRUE); */
215		} else {
216			/*
217			 * Read operation was short.  If no error occured
218			 * we may have hit a zero-fill section.   We simply
219			 * leave valid set to 0.
220			 */
221			;
222		}
223		if (i != ap->a_reqpage) {
224			/*
225			 * Whether or not to leave the page activated is up in
226			 * the air, but we should put the page on a page queue
227			 * somewhere (it already is in the object).  Result:
228			 * It appears that emperical results show that
229			 * deactivating pages is best.
230			 */
231
232			/*
233			 * Just in case someone was asking for this page we
234			 * now tell them that it is ok to use.
235			 */
236			if (!error) {
237				if (m->flags & PG_WANTED)
238					vm_page_activate(m);
239				else
240					vm_page_deactivate(m);
241				vm_page_wakeup(m);
242			} else {
243				vm_page_free(m);
244			}
245		}
246	}
247	vm_page_unlock_queues();
248	VM_OBJECT_UNLOCK(object);
249	return 0;
250}
251
252/*
253 * Vnode op for VM putpages.
254 */
255int
256nfs_putpages(struct vop_putpages_args *ap)
257{
258	struct uio uio;
259	struct iovec iov;
260	vm_offset_t kva;
261	struct buf *bp;
262	int iomode, must_commit, i, error, npages, count;
263	off_t offset;
264	int *rtvals;
265	struct vnode *vp;
266	struct thread *td;
267	struct ucred *cred;
268	struct nfsmount *nmp;
269	struct nfsnode *np;
270	vm_page_t *pages;
271
272	GIANT_REQUIRED;
273
274	vp = ap->a_vp;
275	np = VTONFS(vp);
276	td = curthread;				/* XXX */
277	cred = curthread->td_ucred;		/* XXX */
278	nmp = VFSTONFS(vp->v_mount);
279	pages = ap->a_m;
280	count = ap->a_count;
281	rtvals = ap->a_rtvals;
282	npages = btoc(count);
283	offset = IDX_TO_OFF(pages[0]->pindex);
284
285	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
286	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
287		(void)nfs_fsinfo(nmp, vp, cred, td);
288	}
289
290	if (!nfs_directio_allow_mmap && (np->n_flag & NNONCACHE) &&
291	    (vp->v_type == VREG))
292		printf("nfs_putpages: called on noncache-able vnode??\n");
293
294	for (i = 0; i < npages; i++)
295		rtvals[i] = VM_PAGER_AGAIN;
296
297	/*
298	 * When putting pages, do not extend file past EOF.
299	 */
300
301	if (offset + count > np->n_size) {
302		count = np->n_size - offset;
303		if (count < 0)
304			count = 0;
305	}
306
307	/*
308	 * We use only the kva address for the buffer, but this is extremely
309	 * convienient and fast.
310	 */
311	bp = getpbuf(&nfs_pbuf_freecnt);
312
313	kva = (vm_offset_t) bp->b_data;
314	pmap_qenter(kva, pages, npages);
315	cnt.v_vnodeout++;
316	cnt.v_vnodepgsout += count;
317
318	iov.iov_base = (caddr_t) kva;
319	iov.iov_len = count;
320	uio.uio_iov = &iov;
321	uio.uio_iovcnt = 1;
322	uio.uio_offset = offset;
323	uio.uio_resid = count;
324	uio.uio_segflg = UIO_SYSSPACE;
325	uio.uio_rw = UIO_WRITE;
326	uio.uio_td = td;
327
328	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
329	    iomode = NFSV3WRITE_UNSTABLE;
330	else
331	    iomode = NFSV3WRITE_FILESYNC;
332
333	error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit);
334
335	pmap_qremove(kva, npages);
336	relpbuf(bp, &nfs_pbuf_freecnt);
337
338	if (!error) {
339		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
340		for (i = 0; i < nwritten; i++) {
341			rtvals[i] = VM_PAGER_OK;
342			vm_page_undirty(pages[i]);
343		}
344		if (must_commit) {
345			nfs_clearcommit(vp->v_mount);
346		}
347	}
348	return rtvals[0];
349}
350
351/*
352 * Vnode op for read using bio
353 */
354int
355nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
356{
357	struct nfsnode *np = VTONFS(vp);
358	int biosize, i;
359	struct buf *bp, *rabp;
360	struct vattr vattr;
361	struct thread *td;
362	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
363	daddr_t lbn, rabn;
364	int bcount;
365	int seqcount;
366	int nra, error = 0, n = 0, on = 0;
367
368#ifdef DIAGNOSTIC
369	if (uio->uio_rw != UIO_READ)
370		panic("nfs_read mode");
371#endif
372	if (uio->uio_resid == 0)
373		return (0);
374	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
375		return (EINVAL);
376	td = uio->uio_td;
377
378	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
379	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
380		(void)nfs_fsinfo(nmp, vp, cred, td);
381	if (vp->v_type != VDIR &&
382	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
383		return (EFBIG);
384
385	if (nfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
386		/* No caching/ no readaheads. Just read data into the user buffer */
387		return nfs_readrpc(vp, uio, cred);
388
389	biosize = vp->v_mount->mnt_stat.f_iosize;
390	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
391	/*
392	 * For nfs, cache consistency can only be maintained approximately.
393	 * Although RFC1094 does not specify the criteria, the following is
394	 * believed to be compatible with the reference port.
395	 * For nfs:
396	 * If the file's modify time on the server has changed since the
397	 * last read rpc or you have written to the file,
398	 * you may have lost data cache consistency with the
399	 * server, so flush all of the file's data out of the cache.
400	 * Then force a getattr rpc to ensure that you have up to date
401	 * attributes.
402	 * NB: This implies that cache data can be read when up to
403	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
404	 * attributes this could be forced by setting n_attrstamp to 0 before
405	 * the VOP_GETATTR() call.
406	 */
407	if (np->n_flag & NMODIFIED) {
408		if (vp->v_type != VREG) {
409			if (vp->v_type != VDIR)
410				panic("nfs: bioread, not dir");
411			(nmp->nm_rpcops->nr_invaldir)(vp);
412			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
413			if (error)
414				return (error);
415		}
416		np->n_attrstamp = 0;
417		error = VOP_GETATTR(vp, &vattr, cred, td);
418		if (error)
419			return (error);
420		np->n_mtime = vattr.va_mtime;
421	} else {
422		error = VOP_GETATTR(vp, &vattr, cred, td);
423		if (error)
424			return (error);
425		if ((np->n_flag & NSIZECHANGED)
426		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
427			if (vp->v_type == VDIR)
428				(nmp->nm_rpcops->nr_invaldir)(vp);
429			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
430			if (error)
431				return (error);
432			np->n_mtime = vattr.va_mtime;
433			np->n_flag &= ~NSIZECHANGED;
434		}
435	}
436	do {
437	    switch (vp->v_type) {
438	    case VREG:
439		nfsstats.biocache_reads++;
440		lbn = uio->uio_offset / biosize;
441		on = uio->uio_offset & (biosize - 1);
442
443		/*
444		 * Start the read ahead(s), as required.
445		 * The readahead is kicked off only if sequential access
446		 * is detected, based on the readahead hint (ra_expect_lbn).
447		 */
448		if (nmp->nm_readahead > 0 && np->ra_expect_lbn == lbn) {
449		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
450			(off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
451			rabn = lbn + 1 + nra;
452			if (incore(&vp->v_bufobj, rabn) == NULL) {
453			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
454			    if (!rabp) {
455				error = nfs_sigintr(nmp, NULL, td);
456				return (error ? error : EINTR);
457			    }
458			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
459				rabp->b_flags |= B_ASYNC;
460				rabp->b_iocmd = BIO_READ;
461				vfs_busy_pages(rabp, 0);
462				if (nfs_asyncio(nmp, rabp, cred, td)) {
463				    rabp->b_flags |= B_INVAL;
464				    rabp->b_ioflags |= BIO_ERROR;
465				    vfs_unbusy_pages(rabp);
466				    brelse(rabp);
467				    break;
468				}
469			    } else {
470				brelse(rabp);
471			    }
472			}
473		    }
474		    np->ra_expect_lbn = lbn + 1;
475		}
476
477		/*
478		 * Obtain the buffer cache block.  Figure out the buffer size
479		 * when we are at EOF.  If we are modifying the size of the
480		 * buffer based on an EOF condition we need to hold
481		 * nfs_rslock() through obtaining the buffer to prevent
482		 * a potential writer-appender from messing with n_size.
483		 * Otherwise we may accidently truncate the buffer and
484		 * lose dirty data.
485		 *
486		 * Note that bcount is *not* DEV_BSIZE aligned.
487		 */
488
489again:
490		bcount = biosize;
491		if ((off_t)lbn * biosize >= np->n_size) {
492			bcount = 0;
493		} else if ((off_t)(lbn + 1) * biosize > np->n_size) {
494			bcount = np->n_size - (off_t)lbn * biosize;
495		}
496		if (bcount != biosize) {
497			switch(nfs_rslock(np, td)) {
498			case ENOLCK:
499				goto again;
500				/* not reached */
501			case EIO:
502				return (EIO);
503			case EINTR:
504			case ERESTART:
505				return(EINTR);
506				/* not reached */
507			default:
508				break;
509			}
510		}
511
512		bp = nfs_getcacheblk(vp, lbn, bcount, td);
513
514		if (bcount != biosize)
515			nfs_rsunlock(np, td);
516		if (!bp) {
517			error = nfs_sigintr(nmp, NULL, td);
518			return (error ? error : EINTR);
519		}
520
521		/*
522		 * If B_CACHE is not set, we must issue the read.  If this
523		 * fails, we return an error.
524		 */
525
526		if ((bp->b_flags & B_CACHE) == 0) {
527		    bp->b_iocmd = BIO_READ;
528		    vfs_busy_pages(bp, 0);
529		    error = nfs_doio(vp, bp, cred, td);
530		    if (error) {
531			brelse(bp);
532			return (error);
533		    }
534		}
535
536		/*
537		 * on is the offset into the current bp.  Figure out how many
538		 * bytes we can copy out of the bp.  Note that bcount is
539		 * NOT DEV_BSIZE aligned.
540		 *
541		 * Then figure out how many bytes we can copy into the uio.
542		 */
543
544		n = 0;
545		if (on < bcount)
546			n = min((unsigned)(bcount - on), uio->uio_resid);
547		break;
548	    case VLNK:
549		nfsstats.biocache_readlinks++;
550		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
551		if (!bp) {
552			error = nfs_sigintr(nmp, NULL, td);
553			return (error ? error : EINTR);
554		}
555		if ((bp->b_flags & B_CACHE) == 0) {
556		    bp->b_iocmd = BIO_READ;
557		    vfs_busy_pages(bp, 0);
558		    error = nfs_doio(vp, bp, cred, td);
559		    if (error) {
560			bp->b_ioflags |= BIO_ERROR;
561			brelse(bp);
562			return (error);
563		    }
564		}
565		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
566		on = 0;
567		break;
568	    case VDIR:
569		nfsstats.biocache_readdirs++;
570		if (np->n_direofoffset
571		    && uio->uio_offset >= np->n_direofoffset) {
572		    return (0);
573		}
574		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
575		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
576		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
577		if (!bp) {
578		    error = nfs_sigintr(nmp, NULL, td);
579		    return (error ? error : EINTR);
580		}
581		if ((bp->b_flags & B_CACHE) == 0) {
582		    bp->b_iocmd = BIO_READ;
583		    vfs_busy_pages(bp, 0);
584		    error = nfs_doio(vp, bp, cred, td);
585		    if (error) {
586			    brelse(bp);
587		    }
588		    while (error == NFSERR_BAD_COOKIE) {
589			(nmp->nm_rpcops->nr_invaldir)(vp);
590			error = nfs_vinvalbuf(vp, 0, td, 1);
591			/*
592			 * Yuck! The directory has been modified on the
593			 * server. The only way to get the block is by
594			 * reading from the beginning to get all the
595			 * offset cookies.
596			 *
597			 * Leave the last bp intact unless there is an error.
598			 * Loop back up to the while if the error is another
599			 * NFSERR_BAD_COOKIE (double yuch!).
600			 */
601			for (i = 0; i <= lbn && !error; i++) {
602			    if (np->n_direofoffset
603				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
604				    return (0);
605			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
606			    if (!bp) {
607				error = nfs_sigintr(nmp, NULL, td);
608				return (error ? error : EINTR);
609			    }
610			    if ((bp->b_flags & B_CACHE) == 0) {
611				    bp->b_iocmd = BIO_READ;
612				    vfs_busy_pages(bp, 0);
613				    error = nfs_doio(vp, bp, cred, td);
614				    /*
615				     * no error + B_INVAL == directory EOF,
616				     * use the block.
617				     */
618				    if (error == 0 && (bp->b_flags & B_INVAL))
619					    break;
620			    }
621			    /*
622			     * An error will throw away the block and the
623			     * for loop will break out.  If no error and this
624			     * is not the block we want, we throw away the
625			     * block and go for the next one via the for loop.
626			     */
627			    if (error || i < lbn)
628				    brelse(bp);
629			}
630		    }
631		    /*
632		     * The above while is repeated if we hit another cookie
633		     * error.  If we hit an error and it wasn't a cookie error,
634		     * we give up.
635		     */
636		    if (error)
637			    return (error);
638		}
639
640		/*
641		 * If not eof and read aheads are enabled, start one.
642		 * (You need the current block first, so that you have the
643		 *  directory offset cookie of the next block.)
644		 */
645		if (nmp->nm_readahead > 0 &&
646		    (bp->b_flags & B_INVAL) == 0 &&
647		    (np->n_direofoffset == 0 ||
648		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
649		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
650			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
651			if (rabp) {
652			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
653				rabp->b_flags |= B_ASYNC;
654				rabp->b_iocmd = BIO_READ;
655				vfs_busy_pages(rabp, 0);
656				if (nfs_asyncio(nmp, rabp, cred, td)) {
657				    rabp->b_flags |= B_INVAL;
658				    rabp->b_ioflags |= BIO_ERROR;
659				    vfs_unbusy_pages(rabp);
660				    brelse(rabp);
661				}
662			    } else {
663				brelse(rabp);
664			    }
665			}
666		}
667		/*
668		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
669		 * chopped for the EOF condition, we cannot tell how large
670		 * NFS directories are going to be until we hit EOF.  So
671		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
672		 * it just so happens that b_resid will effectively chop it
673		 * to EOF.  *BUT* this information is lost if the buffer goes
674		 * away and is reconstituted into a B_CACHE state ( due to
675		 * being VMIO ) later.  So we keep track of the directory eof
676		 * in np->n_direofoffset and chop it off as an extra step
677		 * right here.
678		 */
679		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
680		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
681			n = np->n_direofoffset - uio->uio_offset;
682		break;
683	    default:
684		printf(" nfs_bioread: type %x unexpected\n", vp->v_type);
685		bp = NULL;
686		break;
687	    };
688
689	    if (n > 0) {
690		    error = uiomove(bp->b_data + on, (int)n, uio);
691	    }
692	    if (vp->v_type == VLNK)
693		n = 0;
694	    if (bp != NULL)
695		brelse(bp);
696	} while (error == 0 && uio->uio_resid > 0 && n > 0);
697	return (error);
698}
699
700/*
701 * The NFS write path cannot handle iovecs with len > 1. So we need to
702 * break up iovecs accordingly (restricting them to wsize).
703 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
704 * For the ASYNC case, 2 copies are needed. The first a copy from the
705 * user buffer to a staging buffer and then a second copy from the staging
706 * buffer to mbufs. This can be optimized by copying from the user buffer
707 * directly into mbufs and passing the chain down, but that requires a
708 * fair amount of re-working of the relevant codepaths (and can be done
709 * later).
710 */
711static int
712nfs_directio_write(vp, uiop, cred, ioflag)
713	struct vnode *vp;
714	struct uio *uiop;
715	struct ucred *cred;
716	int ioflag;
717{
718	int error;
719	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
720	struct thread *td = uiop->uio_td;
721	int size;
722
723	if (ioflag & IO_SYNC) {
724		int iomode, must_commit;
725		struct uio uio;
726		struct iovec iov;
727do_sync:
728		while (uiop->uio_resid > 0) {
729			size = min(uiop->uio_resid, nmp->nm_wsize);
730			size = min(uiop->uio_iov->iov_len, size);
731			iov.iov_base = uiop->uio_iov->iov_base;
732			iov.iov_len = size;
733			uio.uio_iov = &iov;
734			uio.uio_iovcnt = 1;
735			uio.uio_offset = uiop->uio_offset;
736			uio.uio_resid = size;
737			uio.uio_segflg = UIO_USERSPACE;
738			uio.uio_rw = UIO_WRITE;
739			uio.uio_td = td;
740			iomode = NFSV3WRITE_FILESYNC;
741			error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred,
742						      &iomode, &must_commit);
743			KASSERT((must_commit == 0),
744				("nfs_directio_write: Did not commit write"));
745			if (error)
746				return (error);
747			uiop->uio_offset += size;
748			uiop->uio_resid -= size;
749			if (uiop->uio_iov->iov_len <= size) {
750				uiop->uio_iovcnt--;
751				uiop->uio_iov++;
752			} else {
753				uiop->uio_iov->iov_base =
754					(char *)uiop->uio_iov->iov_base + size;
755				uiop->uio_iov->iov_len -= size;
756			}
757		}
758	} else {
759		struct uio *t_uio;
760		struct iovec *t_iov;
761		struct buf *bp;
762
763		/*
764		 * Break up the write into blocksize chunks and hand these
765		 * over to nfsiod's for write back.
766		 * Unfortunately, this incurs a copy of the data. Since
767		 * the user could modify the buffer before the write is
768		 * initiated.
769		 *
770		 * The obvious optimization here is that one of the 2 copies
771		 * in the async write path can be eliminated by copying the
772		 * data here directly into mbufs and passing the mbuf chain
773		 * down. But that will require a fair amount of re-working
774		 * of the code and can be done if there's enough interest
775		 * in NFS directio access.
776		 */
777		while (uiop->uio_resid > 0) {
778			size = min(uiop->uio_resid, nmp->nm_wsize);
779			size = min(uiop->uio_iov->iov_len, size);
780			bp = getpbuf(&nfs_pbuf_freecnt);
781			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
782			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
783			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
784			t_iov->iov_len = size;
785			t_uio->uio_iov = t_iov;
786			t_uio->uio_iovcnt = 1;
787			t_uio->uio_offset = uiop->uio_offset;
788			t_uio->uio_resid = size;
789			t_uio->uio_segflg = UIO_SYSSPACE;
790			t_uio->uio_rw = UIO_WRITE;
791			t_uio->uio_td = td;
792			bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
793			bp->b_flags |= B_DIRECT;
794			bp->b_iocmd = BIO_WRITE;
795			if (cred != NOCRED) {
796				crhold(cred);
797				bp->b_wcred = cred;
798			} else
799				bp->b_wcred = NOCRED;
800			bp->b_caller1 = (void *)t_uio;
801			bp->b_vp = vp;
802			vhold(vp);
803			error = nfs_asyncio(nmp, bp, NOCRED, td);
804			if (error) {
805				free(t_iov->iov_base, M_NFSDIRECTIO);
806				free(t_iov, M_NFSDIRECTIO);
807				free(t_uio, M_NFSDIRECTIO);
808				vdrop(bp->b_vp);
809				bp->b_vp = NULL;
810				relpbuf(bp, &nfs_pbuf_freecnt);
811				if (error == EINTR)
812					return (error);
813				goto do_sync;
814			}
815			uiop->uio_offset += size;
816			uiop->uio_resid -= size;
817			if (uiop->uio_iov->iov_len <= size) {
818				uiop->uio_iovcnt--;
819				uiop->uio_iov++;
820			} else {
821				uiop->uio_iov->iov_base =
822					(char *)uiop->uio_iov->iov_base + size;
823				uiop->uio_iov->iov_len -= size;
824			}
825		}
826	}
827	return (0);
828}
829
830/*
831 * Vnode op for write using bio
832 */
833int
834nfs_write(struct vop_write_args *ap)
835{
836	int biosize;
837	struct uio *uio = ap->a_uio;
838	struct thread *td = uio->uio_td;
839	struct vnode *vp = ap->a_vp;
840	struct nfsnode *np = VTONFS(vp);
841	struct ucred *cred = ap->a_cred;
842	int ioflag = ap->a_ioflag;
843	struct buf *bp;
844	struct vattr vattr;
845	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
846	daddr_t lbn;
847	int bcount;
848	int n, on, error = 0;
849	int haverslock = 0;
850	struct proc *p = td?td->td_proc:NULL;
851
852	GIANT_REQUIRED;
853
854#ifdef DIAGNOSTIC
855	if (uio->uio_rw != UIO_WRITE)
856		panic("nfs_write mode");
857	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
858		panic("nfs_write proc");
859#endif
860	if (vp->v_type != VREG)
861		return (EIO);
862	if (np->n_flag & NWRITEERR) {
863		np->n_flag &= ~NWRITEERR;
864		return (np->n_error);
865	}
866	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
867	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
868		(void)nfs_fsinfo(nmp, vp, cred, td);
869
870	/*
871	 * Synchronously flush pending buffers if we are in synchronous
872	 * mode or if we are appending.
873	 */
874	if (ioflag & (IO_APPEND | IO_SYNC)) {
875		if (np->n_flag & NMODIFIED) {
876#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
877			/*
878			 * Require non-blocking, synchronous writes to
879			 * dirty files to inform the program it needs
880			 * to fsync(2) explicitly.
881			 */
882			if (ioflag & IO_NDELAY)
883				return (EAGAIN);
884#endif
885flush_and_restart:
886			np->n_attrstamp = 0;
887			error = nfs_vinvalbuf(vp, V_SAVE, td, 1);
888			if (error)
889				return (error);
890		}
891	}
892
893	/*
894	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
895	 * get the append lock.
896	 */
897restart:
898	if (ioflag & IO_APPEND) {
899		np->n_attrstamp = 0;
900		error = VOP_GETATTR(vp, &vattr, cred, td);
901		if (error)
902			return (error);
903		uio->uio_offset = np->n_size;
904	}
905
906	if (uio->uio_offset < 0)
907		return (EINVAL);
908	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
909		return (EFBIG);
910	if (uio->uio_resid == 0)
911		return (0);
912
913	if (nfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
914		return nfs_directio_write(vp, uio, cred, ioflag);
915
916	/*
917	 * We need to obtain the rslock if we intend to modify np->n_size
918	 * in order to guarentee the append point with multiple contending
919	 * writers, to guarentee that no other appenders modify n_size
920	 * while we are trying to obtain a truncated buffer (i.e. to avoid
921	 * accidently truncating data written by another appender due to
922	 * the race), and to ensure that the buffer is populated prior to
923	 * our extending of the file.  We hold rslock through the entire
924	 * operation.
925	 *
926	 * Note that we do not synchronize the case where someone truncates
927	 * the file while we are appending to it because attempting to lock
928	 * this case may deadlock other parts of the system unexpectedly.
929	 */
930	if ((ioflag & IO_APPEND) ||
931	    uio->uio_offset + uio->uio_resid > np->n_size) {
932		switch(nfs_rslock(np, td)) {
933		case ENOLCK:
934			goto restart;
935			/* not reached */
936		case EIO:
937			return (EIO);
938		case EINTR:
939		case ERESTART:
940			return(EINTR);
941			/* not reached */
942		default:
943			break;
944		}
945		haverslock = 1;
946	}
947
948	/*
949	 * Maybe this should be above the vnode op call, but so long as
950	 * file servers have no limits, i don't think it matters
951	 */
952	if (p != NULL) {
953		PROC_LOCK(p);
954		if (uio->uio_offset + uio->uio_resid >
955		    lim_cur(p, RLIMIT_FSIZE)) {
956			psignal(p, SIGXFSZ);
957			PROC_UNLOCK(p);
958			if (haverslock)
959				nfs_rsunlock(np, td);
960			return (EFBIG);
961		}
962		PROC_UNLOCK(p);
963	}
964
965	biosize = vp->v_mount->mnt_stat.f_iosize;
966	/*
967	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
968	 * would exceed the local maximum per-file write commit size when
969	 * combined with those, we must decide whether to flush,
970	 * go synchronous, or return error.  We don't bother checking
971	 * IO_UNIT -- we just make all writes atomic anyway, as there's
972	 * no point optimizing for something that really won't ever happen.
973	 */
974	if (!(ioflag & IO_SYNC)) {
975		int needrestart = 0;
976		if (nmp->nm_wcommitsize < uio->uio_resid) {
977			/*
978			 * If this request could not possibly be completed
979			 * without exceeding the maximum outstanding write
980			 * commit size, see if we can convert it into a
981			 * synchronous write operation.
982			 */
983			if (ioflag & IO_NDELAY)
984				return (EAGAIN);
985			ioflag |= IO_SYNC;
986			if (np->n_flag & NMODIFIED)
987				needrestart = 1;
988		} else if (np->n_flag & NMODIFIED) {
989			int wouldcommit = 0;
990			BO_LOCK(&vp->v_bufobj);
991			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
992				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
993				    b_bobufs) {
994					if (bp->b_flags & B_NEEDCOMMIT)
995						wouldcommit += bp->b_bcount;
996				}
997			}
998			BO_UNLOCK(&vp->v_bufobj);
999			/*
1000			 * Since we're not operating synchronously and
1001			 * bypassing the buffer cache, we are in a commit
1002			 * and holding all of these buffers whether
1003			 * transmitted or not.  If not limited, this
1004			 * will lead to the buffer cache deadlocking,
1005			 * as no one else can flush our uncommitted buffers.
1006			 */
1007			wouldcommit += uio->uio_resid;
1008			/*
1009			 * If we would initially exceed the maximum
1010			 * outstanding write commit size, flush and restart.
1011			 */
1012			if (wouldcommit > nmp->nm_wcommitsize)
1013				needrestart = 1;
1014		}
1015		if (needrestart) {
1016			if (haverslock) {
1017				nfs_rsunlock(np, td);
1018				haverslock = 0;
1019			}
1020			goto flush_and_restart;
1021		}
1022	}
1023
1024	do {
1025		nfsstats.biocache_writes++;
1026		lbn = uio->uio_offset / biosize;
1027		on = uio->uio_offset & (biosize-1);
1028		n = min((unsigned)(biosize - on), uio->uio_resid);
1029again:
1030		/*
1031		 * Handle direct append and file extension cases, calculate
1032		 * unaligned buffer size.
1033		 */
1034
1035		if (uio->uio_offset == np->n_size && n) {
1036			/*
1037			 * Get the buffer (in its pre-append state to maintain
1038			 * B_CACHE if it was previously set).  Resize the
1039			 * nfsnode after we have locked the buffer to prevent
1040			 * readers from reading garbage.
1041			 */
1042			bcount = on;
1043			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1044
1045			if (bp != NULL) {
1046				long save;
1047
1048				np->n_size = uio->uio_offset + n;
1049				np->n_flag |= NMODIFIED;
1050				vnode_pager_setsize(vp, np->n_size);
1051
1052				save = bp->b_flags & B_CACHE;
1053				bcount += n;
1054				allocbuf(bp, bcount);
1055				bp->b_flags |= save;
1056			}
1057		} else {
1058			/*
1059			 * Obtain the locked cache block first, and then
1060			 * adjust the file's size as appropriate.
1061			 */
1062			bcount = on + n;
1063			if ((off_t)lbn * biosize + bcount < np->n_size) {
1064				if ((off_t)(lbn + 1) * biosize < np->n_size)
1065					bcount = biosize;
1066				else
1067					bcount = np->n_size - (off_t)lbn * biosize;
1068			}
1069			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1070			if (uio->uio_offset + n > np->n_size) {
1071				np->n_size = uio->uio_offset + n;
1072				np->n_flag |= NMODIFIED;
1073				vnode_pager_setsize(vp, np->n_size);
1074			}
1075		}
1076
1077		if (!bp) {
1078			error = nfs_sigintr(nmp, NULL, td);
1079			if (!error)
1080				error = EINTR;
1081			break;
1082		}
1083
1084		/*
1085		 * Issue a READ if B_CACHE is not set.  In special-append
1086		 * mode, B_CACHE is based on the buffer prior to the write
1087		 * op and is typically set, avoiding the read.  If a read
1088		 * is required in special append mode, the server will
1089		 * probably send us a short-read since we extended the file
1090		 * on our end, resulting in b_resid == 0 and, thusly,
1091		 * B_CACHE getting set.
1092		 *
1093		 * We can also avoid issuing the read if the write covers
1094		 * the entire buffer.  We have to make sure the buffer state
1095		 * is reasonable in this case since we will not be initiating
1096		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1097		 * more information.
1098		 *
1099		 * B_CACHE may also be set due to the buffer being cached
1100		 * normally.
1101		 */
1102
1103		if (on == 0 && n == bcount) {
1104			bp->b_flags |= B_CACHE;
1105			bp->b_flags &= ~B_INVAL;
1106			bp->b_ioflags &= ~BIO_ERROR;
1107		}
1108
1109		if ((bp->b_flags & B_CACHE) == 0) {
1110			bp->b_iocmd = BIO_READ;
1111			vfs_busy_pages(bp, 0);
1112			error = nfs_doio(vp, bp, cred, td);
1113			if (error) {
1114				brelse(bp);
1115				break;
1116			}
1117		}
1118		if (bp->b_wcred == NOCRED)
1119			bp->b_wcred = crhold(cred);
1120		np->n_flag |= NMODIFIED;
1121
1122		/*
1123		 * If dirtyend exceeds file size, chop it down.  This should
1124		 * not normally occur but there is an append race where it
1125		 * might occur XXX, so we log it.
1126		 *
1127		 * If the chopping creates a reverse-indexed or degenerate
1128		 * situation with dirtyoff/end, we 0 both of them.
1129		 */
1130
1131		if (bp->b_dirtyend > bcount) {
1132			printf("NFS append race @%lx:%d\n",
1133			    (long)bp->b_blkno * DEV_BSIZE,
1134			    bp->b_dirtyend - bcount);
1135			bp->b_dirtyend = bcount;
1136		}
1137
1138		if (bp->b_dirtyoff >= bp->b_dirtyend)
1139			bp->b_dirtyoff = bp->b_dirtyend = 0;
1140
1141		/*
1142		 * If the new write will leave a contiguous dirty
1143		 * area, just update the b_dirtyoff and b_dirtyend,
1144		 * otherwise force a write rpc of the old dirty area.
1145		 *
1146		 * While it is possible to merge discontiguous writes due to
1147		 * our having a B_CACHE buffer ( and thus valid read data
1148		 * for the hole), we don't because it could lead to
1149		 * significant cache coherency problems with multiple clients,
1150		 * especially if locking is implemented later on.
1151		 *
1152		 * as an optimization we could theoretically maintain
1153		 * a linked list of discontinuous areas, but we would still
1154		 * have to commit them separately so there isn't much
1155		 * advantage to it except perhaps a bit of asynchronization.
1156		 */
1157
1158		if (bp->b_dirtyend > 0 &&
1159		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1160			if (bwrite(bp) == EINTR) {
1161				error = EINTR;
1162				break;
1163			}
1164			goto again;
1165		}
1166
1167		error = uiomove((char *)bp->b_data + on, n, uio);
1168
1169		/*
1170		 * Since this block is being modified, it must be written
1171		 * again and not just committed.  Since write clustering does
1172		 * not work for the stage 1 data write, only the stage 2
1173		 * commit rpc, we have to clear B_CLUSTEROK as well.
1174		 */
1175		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1176
1177		if (error) {
1178			bp->b_ioflags |= BIO_ERROR;
1179			brelse(bp);
1180			break;
1181		}
1182
1183		/*
1184		 * Only update dirtyoff/dirtyend if not a degenerate
1185		 * condition.
1186		 */
1187		if (n) {
1188			if (bp->b_dirtyend > 0) {
1189				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1190				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1191			} else {
1192				bp->b_dirtyoff = on;
1193				bp->b_dirtyend = on + n;
1194			}
1195			vfs_bio_set_validclean(bp, on, n);
1196		}
1197
1198		/*
1199		 * If IO_SYNC do bwrite().
1200		 *
1201		 * IO_INVAL appears to be unused.  The idea appears to be
1202		 * to turn off caching in this case.  Very odd.  XXX
1203		 */
1204		if ((ioflag & IO_SYNC)) {
1205			if (ioflag & IO_INVAL)
1206				bp->b_flags |= B_NOCACHE;
1207			error = bwrite(bp);
1208			if (error)
1209				break;
1210		} else if ((n + on) == biosize) {
1211			bp->b_flags |= B_ASYNC;
1212			(void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0);
1213		} else {
1214			bdwrite(bp);
1215		}
1216	} while (uio->uio_resid > 0 && n > 0);
1217
1218	if (haverslock)
1219		nfs_rsunlock(np, td);
1220
1221	return (error);
1222}
1223
1224/*
1225 * Get an nfs cache block.
1226 *
1227 * Allocate a new one if the block isn't currently in the cache
1228 * and return the block marked busy. If the calling process is
1229 * interrupted by a signal for an interruptible mount point, return
1230 * NULL.
1231 *
1232 * The caller must carefully deal with the possible B_INVAL state of
1233 * the buffer.  nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
1234 * indirectly), so synchronous reads can be issued without worrying about
1235 * the B_INVAL state.  We have to be a little more careful when dealing
1236 * with writes (see comments in nfs_write()) when extending a file past
1237 * its EOF.
1238 */
1239static struct buf *
1240nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1241{
1242	struct buf *bp;
1243	struct mount *mp;
1244	struct nfsmount *nmp;
1245
1246	mp = vp->v_mount;
1247	nmp = VFSTONFS(mp);
1248
1249	if (nmp->nm_flag & NFSMNT_INT) {
1250 		sigset_t oldset;
1251
1252 		nfs_set_sigmask(td, &oldset);
1253		bp = getblk(vp, bn, size, PCATCH, 0, 0);
1254 		nfs_restore_sigmask(td, &oldset);
1255		while (bp == NULL) {
1256			if (nfs_sigintr(nmp, NULL, td))
1257				return (NULL);
1258			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1259		}
1260	} else {
1261		bp = getblk(vp, bn, size, 0, 0, 0);
1262	}
1263
1264	if (vp->v_type == VREG) {
1265		int biosize;
1266
1267		biosize = mp->mnt_stat.f_iosize;
1268		bp->b_blkno = bn * (biosize / DEV_BSIZE);
1269	}
1270	return (bp);
1271}
1272
1273/*
1274 * Flush and invalidate all dirty buffers. If another process is already
1275 * doing the flush, just wait for completion.
1276 */
1277int
1278nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1279{
1280	struct nfsnode *np = VTONFS(vp);
1281	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1282	int error = 0, slpflag, slptimeo;
1283 	int old_lock = 0;
1284
1285	ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf");
1286
1287	/*
1288	 * XXX This check stops us from needlessly doing a vinvalbuf when
1289	 * being called through vclean().  It is not clear that this is
1290	 * unsafe.
1291	 */
1292	if (vp->v_iflag & VI_DOOMED)
1293		return (0);
1294
1295	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1296		intrflg = 0;
1297	if (intrflg) {
1298		slpflag = PCATCH;
1299		slptimeo = 2 * hz;
1300	} else {
1301		slpflag = 0;
1302		slptimeo = 0;
1303	}
1304
1305 	if ((old_lock = VOP_ISLOCKED(vp, td)) != LK_EXCLUSIVE) {
1306 		if (old_lock == LK_SHARED) {
1307 			/* Upgrade to exclusive lock, this might block */
1308 			vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
1309 		} else {
1310 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1311 		}
1312  	}
1313
1314	/*
1315	 * Now, flush as required.
1316	 */
1317	error = vinvalbuf(vp, flags, td, slpflag, 0);
1318	while (error) {
1319		if (intrflg && (error = nfs_sigintr(nmp, NULL, td)))
1320			goto out;
1321		error = vinvalbuf(vp, flags, td, 0, slptimeo);
1322	}
1323	np->n_flag &= ~NMODIFIED;
1324out:
1325 	if (old_lock != LK_EXCLUSIVE) {
1326 		if (old_lock == LK_SHARED) {
1327 			/* Downgrade from exclusive lock, this might block */
1328 			vn_lock(vp, LK_DOWNGRADE, td);
1329 		} else {
1330 			VOP_UNLOCK(vp, 0, td);
1331 		}
1332  	}
1333	return error;
1334}
1335
1336/*
1337 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1338 * This is mainly to avoid queueing async I/O requests when the nfsiods
1339 * are all hung on a dead server.
1340 *
1341 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1342 * is eventually dequeued by the async daemon, nfs_doio() *will*.
1343 */
1344int
1345nfs_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1346{
1347	int iod;
1348	int gotiod;
1349	int slpflag = 0;
1350	int slptimeo = 0;
1351	int error, error2;
1352
1353	/*
1354	 * Commits are usually short and sweet so lets save some cpu and
1355	 * leave the async daemons for more important rpc's (such as reads
1356	 * and writes).
1357	 */
1358	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1359	    (nmp->nm_bufqiods > nfs_numasync / 2)) {
1360		return(EIO);
1361	}
1362
1363again:
1364	if (nmp->nm_flag & NFSMNT_INT)
1365		slpflag = PCATCH;
1366	gotiod = FALSE;
1367
1368	/*
1369	 * Find a free iod to process this request.
1370	 */
1371	for (iod = 0; iod < nfs_numasync; iod++)
1372		if (nfs_iodwant[iod]) {
1373			gotiod = TRUE;
1374			break;
1375		}
1376
1377	/*
1378	 * Try to create one if none are free.
1379	 */
1380	if (!gotiod) {
1381		iod = nfs_nfsiodnew();
1382		if (iod != -1)
1383			gotiod = TRUE;
1384	}
1385
1386	if (gotiod) {
1387		/*
1388		 * Found one, so wake it up and tell it which
1389		 * mount to process.
1390		 */
1391		NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n",
1392		    iod, nmp));
1393		nfs_iodwant[iod] = NULL;
1394		nfs_iodmount[iod] = nmp;
1395		nmp->nm_bufqiods++;
1396		wakeup(&nfs_iodwant[iod]);
1397	}
1398
1399	/*
1400	 * If none are free, we may already have an iod working on this mount
1401	 * point.  If so, it will process our request.
1402	 */
1403	if (!gotiod) {
1404		if (nmp->nm_bufqiods > 0) {
1405			NFS_DPF(ASYNCIO,
1406				("nfs_asyncio: %d iods are already processing mount %p\n",
1407				 nmp->nm_bufqiods, nmp));
1408			gotiod = TRUE;
1409		}
1410	}
1411
1412	/*
1413	 * If we have an iod which can process the request, then queue
1414	 * the buffer.
1415	 */
1416	if (gotiod) {
1417		/*
1418		 * Ensure that the queue never grows too large.  We still want
1419		 * to asynchronize so we block rather then return EIO.
1420		 */
1421		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
1422			NFS_DPF(ASYNCIO,
1423				("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
1424			nmp->nm_bufqwant = TRUE;
1425 			error = nfs_tsleep(td, &nmp->nm_bufq, slpflag | PRIBIO,
1426 					   "nfsaio", slptimeo);
1427			if (error) {
1428				error2 = nfs_sigintr(nmp, NULL, td);
1429				if (error2)
1430					return (error2);
1431				if (slpflag == PCATCH) {
1432					slpflag = 0;
1433					slptimeo = 2 * hz;
1434				}
1435			}
1436			/*
1437			 * We might have lost our iod while sleeping,
1438			 * so check and loop if nescessary.
1439			 */
1440			if (nmp->nm_bufqiods == 0) {
1441				NFS_DPF(ASYNCIO,
1442					("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1443				goto again;
1444			}
1445		}
1446
1447		if (bp->b_iocmd == BIO_READ) {
1448			if (bp->b_rcred == NOCRED && cred != NOCRED)
1449				bp->b_rcred = crhold(cred);
1450		} else {
1451			if (bp->b_wcred == NOCRED && cred != NOCRED)
1452				bp->b_wcred = crhold(cred);
1453		}
1454
1455		if (bp->b_flags & B_REMFREE)
1456			bremfreef(bp);
1457		BUF_KERNPROC(bp);
1458		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1459		nmp->nm_bufqlen++;
1460		return (0);
1461	}
1462
1463	/*
1464	 * All the iods are busy on other mounts, so return EIO to
1465	 * force the caller to process the i/o synchronously.
1466	 */
1467	NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
1468	return (EIO);
1469}
1470
1471void
1472nfs_doio_directwrite(struct buf *bp)
1473{
1474	int iomode, must_commit;
1475	struct uio *uiop = (struct uio *)bp->b_caller1;
1476	char *iov_base = uiop->uio_iov->iov_base;
1477	struct nfsmount *nmp = VFSTONFS(bp->b_vp->v_mount);
1478
1479	iomode = NFSV3WRITE_FILESYNC;
1480	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1481	(nmp->nm_rpcops->nr_writerpc)(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1482	KASSERT((must_commit == 0), ("nfs_doio_directwrite: Did not commit write"));
1483	free(iov_base, M_NFSDIRECTIO);
1484	free(uiop->uio_iov, M_NFSDIRECTIO);
1485	free(uiop, M_NFSDIRECTIO);
1486	vdrop(bp->b_vp);
1487	bp->b_vp = NULL;
1488	relpbuf(bp, &nfs_pbuf_freecnt);
1489}
1490
1491/*
1492 * Do an I/O operation to/from a cache block. This may be called
1493 * synchronously or from an nfsiod.
1494 */
1495int
1496nfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1497{
1498	struct uio *uiop;
1499	struct nfsnode *np;
1500	struct nfsmount *nmp;
1501	int error = 0, iomode, must_commit = 0;
1502	struct uio uio;
1503	struct iovec io;
1504	struct proc *p = td ? td->td_proc : NULL;
1505
1506	np = VTONFS(vp);
1507	nmp = VFSTONFS(vp->v_mount);
1508	uiop = &uio;
1509	uiop->uio_iov = &io;
1510	uiop->uio_iovcnt = 1;
1511	uiop->uio_segflg = UIO_SYSSPACE;
1512	uiop->uio_td = td;
1513
1514	/*
1515	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1516	 * do this here so we do not have to do it in all the code that
1517	 * calls us.
1518	 */
1519	bp->b_flags &= ~B_INVAL;
1520	bp->b_ioflags &= ~BIO_ERROR;
1521
1522	KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
1523
1524	if (bp->b_iocmd == BIO_READ) {
1525	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1526	    io.iov_base = bp->b_data;
1527	    uiop->uio_rw = UIO_READ;
1528
1529	    switch (vp->v_type) {
1530	    case VREG:
1531		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1532		nfsstats.read_bios++;
1533		error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr);
1534
1535		if (!error) {
1536		    if (uiop->uio_resid) {
1537			/*
1538			 * If we had a short read with no error, we must have
1539			 * hit a file hole.  We should zero-fill the remainder.
1540			 * This can also occur if the server hits the file EOF.
1541			 *
1542			 * Holes used to be able to occur due to pending
1543			 * writes, but that is not possible any longer.
1544			 */
1545			int nread = bp->b_bcount - uiop->uio_resid;
1546			int left  = uiop->uio_resid;
1547
1548			if (left > 0)
1549				bzero((char *)bp->b_data + nread, left);
1550			uiop->uio_resid = 0;
1551		    }
1552		}
1553		/* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */
1554		if (p && (vp->v_vflag & VV_TEXT) &&
1555		    (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.va_mtime))) {
1556			PROC_LOCK(p);
1557			killproc(p, "text file modification");
1558			PROC_UNLOCK(p);
1559		}
1560		break;
1561	    case VLNK:
1562		uiop->uio_offset = (off_t)0;
1563		nfsstats.readlink_bios++;
1564		error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr);
1565		break;
1566	    case VDIR:
1567		nfsstats.readdir_bios++;
1568		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1569		if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
1570			error = nfs4_readdirrpc(vp, uiop, cr);
1571		else {
1572			if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1573				error = nfs_readdirplusrpc(vp, uiop, cr);
1574				if (error == NFSERR_NOTSUPP)
1575					nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1576			}
1577			if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1578				error = nfs_readdirrpc(vp, uiop, cr);
1579		}
1580		/*
1581		 * end-of-directory sets B_INVAL but does not generate an
1582		 * error.
1583		 */
1584		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1585			bp->b_flags |= B_INVAL;
1586		break;
1587	    default:
1588		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
1589		break;
1590	    };
1591	    if (error) {
1592		bp->b_ioflags |= BIO_ERROR;
1593		bp->b_error = error;
1594	    }
1595	} else {
1596	    /*
1597	     * If we only need to commit, try to commit
1598	     */
1599	    if (bp->b_flags & B_NEEDCOMMIT) {
1600		    int retv;
1601		    off_t off;
1602
1603		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1604		    retv = (nmp->nm_rpcops->nr_commit)(
1605				vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1606				bp->b_wcred, td);
1607		    if (retv == 0) {
1608			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1609			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1610			    bp->b_resid = 0;
1611			    bufdone(bp);
1612			    return (0);
1613		    }
1614		    if (retv == NFSERR_STALEWRITEVERF) {
1615			    nfs_clearcommit(vp->v_mount);
1616		    }
1617	    }
1618
1619	    /*
1620	     * Setup for actual write
1621	     */
1622
1623	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1624		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1625
1626	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1627		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1628		    - bp->b_dirtyoff;
1629		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1630		    + bp->b_dirtyoff;
1631		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1632		uiop->uio_rw = UIO_WRITE;
1633		nfsstats.write_bios++;
1634
1635		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1636		    iomode = NFSV3WRITE_UNSTABLE;
1637		else
1638		    iomode = NFSV3WRITE_FILESYNC;
1639
1640		error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit);
1641
1642		/*
1643		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1644		 * to cluster the buffers needing commit.  This will allow
1645		 * the system to submit a single commit rpc for the whole
1646		 * cluster.  We can do this even if the buffer is not 100%
1647		 * dirty (relative to the NFS blocksize), so we optimize the
1648		 * append-to-file-case.
1649		 *
1650		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1651		 * cleared because write clustering only works for commit
1652		 * rpc's, not for the data portion of the write).
1653		 */
1654
1655		if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1656		    bp->b_flags |= B_NEEDCOMMIT;
1657		    if (bp->b_dirtyoff == 0
1658			&& bp->b_dirtyend == bp->b_bcount)
1659			bp->b_flags |= B_CLUSTEROK;
1660		} else {
1661		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1662		}
1663
1664		/*
1665		 * For an interrupted write, the buffer is still valid
1666		 * and the write hasn't been pushed to the server yet,
1667		 * so we can't set BIO_ERROR and report the interruption
1668		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1669		 * is not relevant, so the rpc attempt is essentially
1670		 * a noop.  For the case of a V3 write rpc not being
1671		 * committed to stable storage, the block is still
1672		 * dirty and requires either a commit rpc or another
1673		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1674		 * the block is reused. This is indicated by setting
1675		 * the B_DELWRI and B_NEEDCOMMIT flags.
1676		 *
1677		 * If the buffer is marked B_PAGING, it does not reside on
1678		 * the vp's paging queues so we cannot call bdirty().  The
1679		 * bp in this case is not an NFS cache block so we should
1680		 * be safe. XXX
1681		 */
1682    		if (error == EINTR || error == EIO
1683		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1684			int s;
1685
1686			s = splbio();
1687			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1688			if ((bp->b_flags & B_PAGING) == 0) {
1689			    bdirty(bp);
1690			    bp->b_flags &= ~B_DONE;
1691			}
1692			if (error && (bp->b_flags & B_ASYNC) == 0)
1693			    bp->b_flags |= B_EINTR;
1694			splx(s);
1695	    	} else {
1696		    if (error) {
1697			bp->b_ioflags |= BIO_ERROR;
1698			bp->b_error = np->n_error = error;
1699			np->n_flag |= NWRITEERR;
1700		    }
1701		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1702		}
1703	    } else {
1704		bp->b_resid = 0;
1705		bufdone(bp);
1706		return (0);
1707	    }
1708	}
1709	bp->b_resid = uiop->uio_resid;
1710	if (must_commit)
1711	    nfs_clearcommit(vp->v_mount);
1712	bufdone(bp);
1713	return (error);
1714}
1715
1716/*
1717 * Used to aid in handling ftruncate() operations on the NFS client side.
1718 * Truncation creates a number of special problems for NFS.  We have to
1719 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1720 * we have to properly handle VM pages or (potentially dirty) buffers
1721 * that straddle the truncation point.
1722 */
1723
1724int
1725nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1726{
1727	struct nfsnode *np = VTONFS(vp);
1728	u_quad_t tsize = np->n_size;
1729	int biosize = vp->v_mount->mnt_stat.f_iosize;
1730	int error = 0;
1731
1732	np->n_size = nsize;
1733
1734	if (np->n_size < tsize) {
1735		struct buf *bp;
1736		daddr_t lbn;
1737		int bufsize;
1738
1739		/*
1740		 * vtruncbuf() doesn't get the buffer overlapping the
1741		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1742		 * buffer that now needs to be truncated.
1743		 */
1744		error = vtruncbuf(vp, cred, td, nsize, biosize);
1745		lbn = nsize / biosize;
1746		bufsize = nsize & (biosize - 1);
1747		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1748 		if (!bp)
1749 			return EINTR;
1750		if (bp->b_dirtyoff > bp->b_bcount)
1751			bp->b_dirtyoff = bp->b_bcount;
1752		if (bp->b_dirtyend > bp->b_bcount)
1753			bp->b_dirtyend = bp->b_bcount;
1754		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1755		brelse(bp);
1756	} else {
1757		vnode_pager_setsize(vp, nsize);
1758	}
1759	return(error);
1760}
1761
1762