1191783Srmacklem/*-
2191783Srmacklem * Copyright (c) 1989, 1993
3191783Srmacklem *	The Regents of the University of California.  All rights reserved.
4191783Srmacklem *
5191783Srmacklem * This code is derived from software contributed to Berkeley by
6191783Srmacklem * Rick Macklem at The University of Guelph.
7191783Srmacklem *
8191783Srmacklem * Redistribution and use in source and binary forms, with or without
9191783Srmacklem * modification, are permitted provided that the following conditions
10191783Srmacklem * are met:
11191783Srmacklem * 1. Redistributions of source code must retain the above copyright
12191783Srmacklem *    notice, this list of conditions and the following disclaimer.
13191783Srmacklem * 2. Redistributions in binary form must reproduce the above copyright
14191783Srmacklem *    notice, this list of conditions and the following disclaimer in the
15191783Srmacklem *    documentation and/or other materials provided with the distribution.
16191783Srmacklem * 4. Neither the name of the University nor the names of its contributors
17191783Srmacklem *    may be used to endorse or promote products derived from this software
18191783Srmacklem *    without specific prior written permission.
19191783Srmacklem *
20191783Srmacklem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21191783Srmacklem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22191783Srmacklem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23191783Srmacklem * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24191783Srmacklem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25191783Srmacklem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26191783Srmacklem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27191783Srmacklem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28191783Srmacklem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29191783Srmacklem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30191783Srmacklem * SUCH DAMAGE.
31191783Srmacklem *
32191783Srmacklem *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33191783Srmacklem */
34191783Srmacklem
35191783Srmacklem#include <sys/cdefs.h>
36191783Srmacklem__FBSDID("$FreeBSD$");
37191783Srmacklem
38223280Srmacklem#include "opt_kdtrace.h"
39223280Srmacklem
40191783Srmacklem#include <sys/param.h>
41191783Srmacklem#include <sys/systm.h>
42191783Srmacklem#include <sys/bio.h>
43191783Srmacklem#include <sys/buf.h>
44191783Srmacklem#include <sys/kernel.h>
45191783Srmacklem#include <sys/mount.h>
46191783Srmacklem#include <sys/vmmeter.h>
47191783Srmacklem#include <sys/vnode.h>
48191783Srmacklem
49191783Srmacklem#include <vm/vm.h>
50240238Skib#include <vm/vm_param.h>
51191783Srmacklem#include <vm/vm_extern.h>
52191783Srmacklem#include <vm/vm_page.h>
53191783Srmacklem#include <vm/vm_object.h>
54191783Srmacklem#include <vm/vm_pager.h>
55191783Srmacklem#include <vm/vnode_pager.h>
56191783Srmacklem
57191783Srmacklem#include <fs/nfs/nfsport.h>
58191783Srmacklem#include <fs/nfsclient/nfsmount.h>
59191783Srmacklem#include <fs/nfsclient/nfs.h>
60191783Srmacklem#include <fs/nfsclient/nfsnode.h>
61223280Srmacklem#include <fs/nfsclient/nfs_kdtrace.h>
62191783Srmacklem
63191783Srmacklemextern int newnfs_directio_allow_mmap;
64191783Srmacklemextern struct nfsstats newnfsstats;
65191783Srmacklemextern struct mtx ncl_iod_mutex;
66191783Srmacklemextern int ncl_numasync;
67220683Srmacklemextern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
68220683Srmacklemextern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
69191783Srmacklemextern int newnfs_directio_enable;
70233730Skibextern int nfs_keep_dirty_on_error;
71191783Srmacklem
72191783Srmacklemint ncl_pbuf_freecnt = -1;	/* start out unlimited */
73191783Srmacklem
74191783Srmacklemstatic struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
75191783Srmacklem    struct thread *td);
76249077Skibstatic int nfs_directio_write(struct vnode *vp, struct uio *uiop,
77191783Srmacklem    struct ucred *cred, int ioflag);
78191783Srmacklem
79191783Srmacklem/*
80191783Srmacklem * Vnode op for VM getpages.
81191783Srmacklem */
82191783Srmacklemint
83191783Srmacklemncl_getpages(struct vop_getpages_args *ap)
84191783Srmacklem{
85191783Srmacklem	int i, error, nextoff, size, toff, count, npages;
86191783Srmacklem	struct uio uio;
87191783Srmacklem	struct iovec iov;
88191783Srmacklem	vm_offset_t kva;
89191783Srmacklem	struct buf *bp;
90191783Srmacklem	struct vnode *vp;
91191783Srmacklem	struct thread *td;
92191783Srmacklem	struct ucred *cred;
93191783Srmacklem	struct nfsmount *nmp;
94191783Srmacklem	vm_object_t object;
95191783Srmacklem	vm_page_t *pages;
96191783Srmacklem	struct nfsnode *np;
97191783Srmacklem
98191783Srmacklem	vp = ap->a_vp;
99191783Srmacklem	np = VTONFS(vp);
100191783Srmacklem	td = curthread;				/* XXX */
101191783Srmacklem	cred = curthread->td_ucred;		/* XXX */
102191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
103191783Srmacklem	pages = ap->a_m;
104191783Srmacklem	count = ap->a_count;
105191783Srmacklem
106191783Srmacklem	if ((object = vp->v_object) == NULL) {
107191783Srmacklem		ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
108194425Salc		return (VM_PAGER_ERROR);
109191783Srmacklem	}
110191783Srmacklem
111191783Srmacklem	if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
112191783Srmacklem		mtx_lock(&np->n_mtx);
113191783Srmacklem		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
114191783Srmacklem			mtx_unlock(&np->n_mtx);
115191783Srmacklem			ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
116194425Salc			return (VM_PAGER_ERROR);
117191783Srmacklem		} else
118191783Srmacklem			mtx_unlock(&np->n_mtx);
119191783Srmacklem	}
120191783Srmacklem
121191783Srmacklem	mtx_lock(&nmp->nm_mtx);
122191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
123249077Skib	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
124191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
125191783Srmacklem		/* We'll never get here for v4, because we always have fsinfo */
126191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
127191783Srmacklem	} else
128191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
129191783Srmacklem
130191783Srmacklem	npages = btoc(count);
131191783Srmacklem
132191783Srmacklem	/*
133191783Srmacklem	 * If the requested page is partially valid, just return it and
134191783Srmacklem	 * allow the pager to zero-out the blanks.  Partially valid pages
135191783Srmacklem	 * can only occur at the file EOF.
136191783Srmacklem	 */
137194425Salc	VM_OBJECT_LOCK(object);
138194425Salc	if (pages[ap->a_reqpage]->valid != 0) {
139194425Salc		for (i = 0; i < npages; ++i) {
140207669Salc			if (i != ap->a_reqpage) {
141207669Salc				vm_page_lock(pages[i]);
142194425Salc				vm_page_free(pages[i]);
143207669Salc				vm_page_unlock(pages[i]);
144207669Salc			}
145191783Srmacklem		}
146191783Srmacklem		VM_OBJECT_UNLOCK(object);
147194425Salc		return (0);
148191783Srmacklem	}
149194425Salc	VM_OBJECT_UNLOCK(object);
150191783Srmacklem
151191783Srmacklem	/*
152191783Srmacklem	 * We use only the kva address for the buffer, but this is extremely
153191783Srmacklem	 * convienient and fast.
154191783Srmacklem	 */
155191783Srmacklem	bp = getpbuf(&ncl_pbuf_freecnt);
156191783Srmacklem
157191783Srmacklem	kva = (vm_offset_t) bp->b_data;
158191783Srmacklem	pmap_qenter(kva, pages, npages);
159191783Srmacklem	PCPU_INC(cnt.v_vnodein);
160191783Srmacklem	PCPU_ADD(cnt.v_vnodepgsin, npages);
161191783Srmacklem
162191783Srmacklem	iov.iov_base = (caddr_t) kva;
163191783Srmacklem	iov.iov_len = count;
164191783Srmacklem	uio.uio_iov = &iov;
165191783Srmacklem	uio.uio_iovcnt = 1;
166191783Srmacklem	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
167191783Srmacklem	uio.uio_resid = count;
168191783Srmacklem	uio.uio_segflg = UIO_SYSSPACE;
169191783Srmacklem	uio.uio_rw = UIO_READ;
170191783Srmacklem	uio.uio_td = td;
171191783Srmacklem
172191783Srmacklem	error = ncl_readrpc(vp, &uio, cred);
173191783Srmacklem	pmap_qremove(kva, npages);
174191783Srmacklem
175191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
176191783Srmacklem
177191783Srmacklem	if (error && (uio.uio_resid == count)) {
178191783Srmacklem		ncl_printf("nfs_getpages: error %d\n", error);
179191783Srmacklem		VM_OBJECT_LOCK(object);
180191783Srmacklem		for (i = 0; i < npages; ++i) {
181207669Salc			if (i != ap->a_reqpage) {
182207669Salc				vm_page_lock(pages[i]);
183191783Srmacklem				vm_page_free(pages[i]);
184207669Salc				vm_page_unlock(pages[i]);
185207669Salc			}
186191783Srmacklem		}
187191783Srmacklem		VM_OBJECT_UNLOCK(object);
188194425Salc		return (VM_PAGER_ERROR);
189191783Srmacklem	}
190191783Srmacklem
191191783Srmacklem	/*
192191783Srmacklem	 * Calculate the number of bytes read and validate only that number
193191783Srmacklem	 * of bytes.  Note that due to pending writes, size may be 0.  This
194191783Srmacklem	 * does not mean that the remaining data is invalid!
195191783Srmacklem	 */
196191783Srmacklem
197191783Srmacklem	size = count - uio.uio_resid;
198191783Srmacklem	VM_OBJECT_LOCK(object);
199191783Srmacklem	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
200191783Srmacklem		vm_page_t m;
201191783Srmacklem		nextoff = toff + PAGE_SIZE;
202191783Srmacklem		m = pages[i];
203191783Srmacklem
204191783Srmacklem		if (nextoff <= size) {
205191783Srmacklem			/*
206191783Srmacklem			 * Read operation filled an entire page
207191783Srmacklem			 */
208191783Srmacklem			m->valid = VM_PAGE_BITS_ALL;
209192065Srmacklem			KASSERT(m->dirty == 0,
210192065Srmacklem			    ("nfs_getpages: page %p is dirty", m));
211191783Srmacklem		} else if (size > toff) {
212191783Srmacklem			/*
213191783Srmacklem			 * Read operation filled a partial page.
214191783Srmacklem			 */
215191783Srmacklem			m->valid = 0;
216192231Srmacklem			vm_page_set_valid(m, 0, size - toff);
217192986Salc			KASSERT(m->dirty == 0,
218192231Srmacklem			    ("nfs_getpages: page %p is dirty", m));
219191783Srmacklem		} else {
220191783Srmacklem			/*
221239554Skib			 * Read operation was short.  If no error
222239554Skib			 * occured we may have hit a zero-fill
223239554Skib			 * section.  We leave valid set to 0, and page
224239554Skib			 * is freed by vm_page_readahead_finish() if
225239554Skib			 * its index is not equal to requested, or
226239554Skib			 * page is zeroed and set valid by
227239554Skib			 * vm_pager_get_pages() for requested page.
228191783Srmacklem			 */
229191783Srmacklem			;
230191783Srmacklem		}
231239554Skib		if (i != ap->a_reqpage)
232239554Skib			vm_page_readahead_finish(m);
233191783Srmacklem	}
234191783Srmacklem	VM_OBJECT_UNLOCK(object);
235194425Salc	return (0);
236191783Srmacklem}
237191783Srmacklem
238191783Srmacklem/*
239191783Srmacklem * Vnode op for VM putpages.
240191783Srmacklem */
241191783Srmacklemint
242191783Srmacklemncl_putpages(struct vop_putpages_args *ap)
243191783Srmacklem{
244191783Srmacklem	struct uio uio;
245191783Srmacklem	struct iovec iov;
246191783Srmacklem	vm_offset_t kva;
247191783Srmacklem	struct buf *bp;
248191783Srmacklem	int iomode, must_commit, i, error, npages, count;
249191783Srmacklem	off_t offset;
250191783Srmacklem	int *rtvals;
251191783Srmacklem	struct vnode *vp;
252191783Srmacklem	struct thread *td;
253191783Srmacklem	struct ucred *cred;
254191783Srmacklem	struct nfsmount *nmp;
255191783Srmacklem	struct nfsnode *np;
256191783Srmacklem	vm_page_t *pages;
257191783Srmacklem
258191783Srmacklem	vp = ap->a_vp;
259191783Srmacklem	np = VTONFS(vp);
260191783Srmacklem	td = curthread;				/* XXX */
261236096Srmacklem	/* Set the cred to n_writecred for the write rpcs. */
262236096Srmacklem	if (np->n_writecred != NULL)
263236096Srmacklem		cred = crhold(np->n_writecred);
264236096Srmacklem	else
265236096Srmacklem		cred = crhold(curthread->td_ucred);	/* XXX */
266191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
267191783Srmacklem	pages = ap->a_m;
268191783Srmacklem	count = ap->a_count;
269191783Srmacklem	rtvals = ap->a_rtvals;
270191783Srmacklem	npages = btoc(count);
271191783Srmacklem	offset = IDX_TO_OFF(pages[0]->pindex);
272249077Skib
273191783Srmacklem	mtx_lock(&nmp->nm_mtx);
274191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
275191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
276191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
277191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
278191783Srmacklem	} else
279191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
280191783Srmacklem
281191783Srmacklem	mtx_lock(&np->n_mtx);
282249077Skib	if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
283191783Srmacklem	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
284249077Skib		mtx_unlock(&np->n_mtx);
285191783Srmacklem		ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
286191783Srmacklem		mtx_lock(&np->n_mtx);
287191783Srmacklem	}
288191783Srmacklem
289191783Srmacklem	for (i = 0; i < npages; i++)
290222586Skib		rtvals[i] = VM_PAGER_ERROR;
291191783Srmacklem
292191783Srmacklem	/*
293191783Srmacklem	 * When putting pages, do not extend file past EOF.
294191783Srmacklem	 */
295191783Srmacklem	if (offset + count > np->n_size) {
296191783Srmacklem		count = np->n_size - offset;
297191783Srmacklem		if (count < 0)
298191783Srmacklem			count = 0;
299191783Srmacklem	}
300191783Srmacklem	mtx_unlock(&np->n_mtx);
301191783Srmacklem
302191783Srmacklem	/*
303191783Srmacklem	 * We use only the kva address for the buffer, but this is extremely
304191783Srmacklem	 * convienient and fast.
305191783Srmacklem	 */
306191783Srmacklem	bp = getpbuf(&ncl_pbuf_freecnt);
307191783Srmacklem
308191783Srmacklem	kva = (vm_offset_t) bp->b_data;
309191783Srmacklem	pmap_qenter(kva, pages, npages);
310191783Srmacklem	PCPU_INC(cnt.v_vnodeout);
311191783Srmacklem	PCPU_ADD(cnt.v_vnodepgsout, count);
312191783Srmacklem
313191783Srmacklem	iov.iov_base = (caddr_t) kva;
314191783Srmacklem	iov.iov_len = count;
315191783Srmacklem	uio.uio_iov = &iov;
316191783Srmacklem	uio.uio_iovcnt = 1;
317191783Srmacklem	uio.uio_offset = offset;
318191783Srmacklem	uio.uio_resid = count;
319191783Srmacklem	uio.uio_segflg = UIO_SYSSPACE;
320191783Srmacklem	uio.uio_rw = UIO_WRITE;
321191783Srmacklem	uio.uio_td = td;
322191783Srmacklem
323191783Srmacklem	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
324191783Srmacklem	    iomode = NFSWRITE_UNSTABLE;
325191783Srmacklem	else
326191783Srmacklem	    iomode = NFSWRITE_FILESYNC;
327191783Srmacklem
328207082Srmacklem	error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
329236096Srmacklem	crfree(cred);
330191783Srmacklem
331191783Srmacklem	pmap_qremove(kva, npages);
332191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
333191783Srmacklem
334233730Skib	if (error == 0 || !nfs_keep_dirty_on_error) {
335233730Skib		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
336233730Skib		if (must_commit)
337233730Skib			ncl_clearcommit(vp->v_mount);
338233730Skib	}
339191783Srmacklem	return rtvals[0];
340191783Srmacklem}
341191783Srmacklem
342191783Srmacklem/*
343191783Srmacklem * For nfs, cache consistency can only be maintained approximately.
344191783Srmacklem * Although RFC1094 does not specify the criteria, the following is
345191783Srmacklem * believed to be compatible with the reference port.
346191783Srmacklem * For nfs:
347191783Srmacklem * If the file's modify time on the server has changed since the
348191783Srmacklem * last read rpc or you have written to the file,
349191783Srmacklem * you may have lost data cache consistency with the
350191783Srmacklem * server, so flush all of the file's data out of the cache.
351191783Srmacklem * Then force a getattr rpc to ensure that you have up to date
352191783Srmacklem * attributes.
353191783Srmacklem * NB: This implies that cache data can be read when up to
354191783Srmacklem * NFS_ATTRTIMEO seconds out of date. If you find that you need current
355191783Srmacklem * attributes this could be forced by setting n_attrstamp to 0 before
356191783Srmacklem * the VOP_GETATTR() call.
357191783Srmacklem */
358191783Srmacklemstatic inline int
359191783Srmacklemnfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
360191783Srmacklem{
361191783Srmacklem	int error = 0;
362191783Srmacklem	struct vattr vattr;
363191783Srmacklem	struct nfsnode *np = VTONFS(vp);
364191783Srmacklem	int old_lock;
365249077Skib
366191783Srmacklem	/*
367191783Srmacklem	 * Grab the exclusive lock before checking whether the cache is
368191783Srmacklem	 * consistent.
369191783Srmacklem	 * XXX - We can make this cheaper later (by acquiring cheaper locks).
370191783Srmacklem	 * But for now, this suffices.
371191783Srmacklem	 */
372191783Srmacklem	old_lock = ncl_upgrade_vnlock(vp);
373193955Srmacklem	if (vp->v_iflag & VI_DOOMED) {
374193955Srmacklem		ncl_downgrade_vnlock(vp, old_lock);
375193955Srmacklem		return (EBADF);
376193955Srmacklem	}
377193955Srmacklem
378191783Srmacklem	mtx_lock(&np->n_mtx);
379191783Srmacklem	if (np->n_flag & NMODIFIED) {
380191783Srmacklem		mtx_unlock(&np->n_mtx);
381191783Srmacklem		if (vp->v_type != VREG) {
382191783Srmacklem			if (vp->v_type != VDIR)
383191783Srmacklem				panic("nfs: bioread, not dir");
384191783Srmacklem			ncl_invaldir(vp);
385191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
386191783Srmacklem			if (error)
387191783Srmacklem				goto out;
388191783Srmacklem		}
389191783Srmacklem		np->n_attrstamp = 0;
390223280Srmacklem		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
391191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
392191783Srmacklem		if (error)
393191783Srmacklem			goto out;
394191783Srmacklem		mtx_lock(&np->n_mtx);
395191783Srmacklem		np->n_mtime = vattr.va_mtime;
396191783Srmacklem		mtx_unlock(&np->n_mtx);
397191783Srmacklem	} else {
398191783Srmacklem		mtx_unlock(&np->n_mtx);
399191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
400191783Srmacklem		if (error)
401191783Srmacklem			return (error);
402191783Srmacklem		mtx_lock(&np->n_mtx);
403191783Srmacklem		if ((np->n_flag & NSIZECHANGED)
404191783Srmacklem		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
405191783Srmacklem			mtx_unlock(&np->n_mtx);
406191783Srmacklem			if (vp->v_type == VDIR)
407191783Srmacklem				ncl_invaldir(vp);
408191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
409191783Srmacklem			if (error)
410191783Srmacklem				goto out;
411191783Srmacklem			mtx_lock(&np->n_mtx);
412191783Srmacklem			np->n_mtime = vattr.va_mtime;
413191783Srmacklem			np->n_flag &= ~NSIZECHANGED;
414191783Srmacklem		}
415191783Srmacklem		mtx_unlock(&np->n_mtx);
416191783Srmacklem	}
417249077Skibout:
418191783Srmacklem	ncl_downgrade_vnlock(vp, old_lock);
419191783Srmacklem	return error;
420191783Srmacklem}
421191783Srmacklem
422191783Srmacklem/*
423191783Srmacklem * Vnode op for read using bio
424191783Srmacklem */
425191783Srmacklemint
426191783Srmacklemncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
427191783Srmacklem{
428191783Srmacklem	struct nfsnode *np = VTONFS(vp);
429191783Srmacklem	int biosize, i;
430191783Srmacklem	struct buf *bp, *rabp;
431191783Srmacklem	struct thread *td;
432191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
433191783Srmacklem	daddr_t lbn, rabn;
434191783Srmacklem	int bcount;
435191783Srmacklem	int seqcount;
436191783Srmacklem	int nra, error = 0, n = 0, on = 0;
437220877Srmacklem	off_t tmp_off;
438191783Srmacklem
439209120Skib	KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
440191783Srmacklem	if (uio->uio_resid == 0)
441191783Srmacklem		return (0);
442191783Srmacklem	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
443191783Srmacklem		return (EINVAL);
444191783Srmacklem	td = uio->uio_td;
445191783Srmacklem
446191783Srmacklem	mtx_lock(&nmp->nm_mtx);
447191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
448191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
449191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
450191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
451191783Srmacklem		mtx_lock(&nmp->nm_mtx);
452191783Srmacklem	}
453191783Srmacklem	if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
454191783Srmacklem		(void) newnfs_iosize(nmp);
455191783Srmacklem
456220877Srmacklem	tmp_off = uio->uio_offset + uio->uio_resid;
457191783Srmacklem	if (vp->v_type != VDIR &&
458220877Srmacklem	    (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
459249077Skib		mtx_unlock(&nmp->nm_mtx);
460191783Srmacklem		return (EFBIG);
461220877Srmacklem	}
462249077Skib	mtx_unlock(&nmp->nm_mtx);
463191783Srmacklem
464191783Srmacklem	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
465191783Srmacklem		/* No caching/ no readaheads. Just read data into the user buffer */
466191783Srmacklem		return ncl_readrpc(vp, uio, cred);
467191783Srmacklem
468231330Srmacklem	biosize = vp->v_bufobj.bo_bsize;
469191783Srmacklem	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
470249077Skib
471191783Srmacklem	error = nfs_bioread_check_cons(vp, td, cred);
472191783Srmacklem	if (error)
473191783Srmacklem		return error;
474191783Srmacklem
475191783Srmacklem	do {
476191783Srmacklem	    u_quad_t nsize;
477249077Skib
478191783Srmacklem	    mtx_lock(&np->n_mtx);
479191783Srmacklem	    nsize = np->n_size;
480249077Skib	    mtx_unlock(&np->n_mtx);
481191783Srmacklem
482191783Srmacklem	    switch (vp->v_type) {
483191783Srmacklem	    case VREG:
484191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_reads);
485191783Srmacklem		lbn = uio->uio_offset / biosize;
486191783Srmacklem		on = uio->uio_offset & (biosize - 1);
487191783Srmacklem
488191783Srmacklem		/*
489191783Srmacklem		 * Start the read ahead(s), as required.
490191783Srmacklem		 */
491191783Srmacklem		if (nmp->nm_readahead > 0) {
492191783Srmacklem		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
493191783Srmacklem			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
494191783Srmacklem			rabn = lbn + 1 + nra;
495191783Srmacklem			if (incore(&vp->v_bufobj, rabn) == NULL) {
496191783Srmacklem			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
497191783Srmacklem			    if (!rabp) {
498191783Srmacklem				error = newnfs_sigintr(nmp, td);
499212217Srmacklem				return (error ? error : EINTR);
500191783Srmacklem			    }
501191783Srmacklem			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
502191783Srmacklem				rabp->b_flags |= B_ASYNC;
503191783Srmacklem				rabp->b_iocmd = BIO_READ;
504191783Srmacklem				vfs_busy_pages(rabp, 0);
505191783Srmacklem				if (ncl_asyncio(nmp, rabp, cred, td)) {
506191783Srmacklem				    rabp->b_flags |= B_INVAL;
507191783Srmacklem				    rabp->b_ioflags |= BIO_ERROR;
508191783Srmacklem				    vfs_unbusy_pages(rabp);
509191783Srmacklem				    brelse(rabp);
510191783Srmacklem				    break;
511191783Srmacklem				}
512191783Srmacklem			    } else {
513191783Srmacklem				brelse(rabp);
514191783Srmacklem			    }
515191783Srmacklem			}
516191783Srmacklem		    }
517191783Srmacklem		}
518191783Srmacklem
519191783Srmacklem		/* Note that bcount is *not* DEV_BSIZE aligned. */
520191783Srmacklem		bcount = biosize;
521191783Srmacklem		if ((off_t)lbn * biosize >= nsize) {
522191783Srmacklem			bcount = 0;
523191783Srmacklem		} else if ((off_t)(lbn + 1) * biosize > nsize) {
524191783Srmacklem			bcount = nsize - (off_t)lbn * biosize;
525191783Srmacklem		}
526191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, bcount, td);
527191783Srmacklem
528191783Srmacklem		if (!bp) {
529191783Srmacklem			error = newnfs_sigintr(nmp, td);
530191783Srmacklem			return (error ? error : EINTR);
531191783Srmacklem		}
532191783Srmacklem
533191783Srmacklem		/*
534191783Srmacklem		 * If B_CACHE is not set, we must issue the read.  If this
535191783Srmacklem		 * fails, we return an error.
536191783Srmacklem		 */
537191783Srmacklem
538191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
539191783Srmacklem		    bp->b_iocmd = BIO_READ;
540191783Srmacklem		    vfs_busy_pages(bp, 0);
541207082Srmacklem		    error = ncl_doio(vp, bp, cred, td, 0);
542191783Srmacklem		    if (error) {
543191783Srmacklem			brelse(bp);
544191783Srmacklem			return (error);
545191783Srmacklem		    }
546191783Srmacklem		}
547191783Srmacklem
548191783Srmacklem		/*
549191783Srmacklem		 * on is the offset into the current bp.  Figure out how many
550191783Srmacklem		 * bytes we can copy out of the bp.  Note that bcount is
551191783Srmacklem		 * NOT DEV_BSIZE aligned.
552191783Srmacklem		 *
553191783Srmacklem		 * Then figure out how many bytes we can copy into the uio.
554191783Srmacklem		 */
555191783Srmacklem
556191783Srmacklem		n = 0;
557191783Srmacklem		if (on < bcount)
558233353Skib			n = MIN((unsigned)(bcount - on), uio->uio_resid);
559191783Srmacklem		break;
560191783Srmacklem	    case VLNK:
561191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
562191783Srmacklem		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
563191783Srmacklem		if (!bp) {
564191783Srmacklem			error = newnfs_sigintr(nmp, td);
565191783Srmacklem			return (error ? error : EINTR);
566191783Srmacklem		}
567191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
568191783Srmacklem		    bp->b_iocmd = BIO_READ;
569191783Srmacklem		    vfs_busy_pages(bp, 0);
570207082Srmacklem		    error = ncl_doio(vp, bp, cred, td, 0);
571191783Srmacklem		    if (error) {
572191783Srmacklem			bp->b_ioflags |= BIO_ERROR;
573191783Srmacklem			brelse(bp);
574191783Srmacklem			return (error);
575191783Srmacklem		    }
576191783Srmacklem		}
577233353Skib		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
578191783Srmacklem		on = 0;
579191783Srmacklem		break;
580191783Srmacklem	    case VDIR:
581191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
582191783Srmacklem		if (np->n_direofoffset
583191783Srmacklem		    && uio->uio_offset >= np->n_direofoffset) {
584191783Srmacklem		    return (0);
585191783Srmacklem		}
586191783Srmacklem		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
587191783Srmacklem		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
588191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
589191783Srmacklem		if (!bp) {
590191783Srmacklem		    error = newnfs_sigintr(nmp, td);
591191783Srmacklem		    return (error ? error : EINTR);
592191783Srmacklem		}
593191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
594191783Srmacklem		    bp->b_iocmd = BIO_READ;
595191783Srmacklem		    vfs_busy_pages(bp, 0);
596207082Srmacklem		    error = ncl_doio(vp, bp, cred, td, 0);
597191783Srmacklem		    if (error) {
598191783Srmacklem			    brelse(bp);
599191783Srmacklem		    }
600191783Srmacklem		    while (error == NFSERR_BAD_COOKIE) {
601191783Srmacklem			ncl_invaldir(vp);
602191783Srmacklem			error = ncl_vinvalbuf(vp, 0, td, 1);
603191783Srmacklem			/*
604191783Srmacklem			 * Yuck! The directory has been modified on the
605191783Srmacklem			 * server. The only way to get the block is by
606191783Srmacklem			 * reading from the beginning to get all the
607191783Srmacklem			 * offset cookies.
608191783Srmacklem			 *
609191783Srmacklem			 * Leave the last bp intact unless there is an error.
610191783Srmacklem			 * Loop back up to the while if the error is another
611191783Srmacklem			 * NFSERR_BAD_COOKIE (double yuch!).
612191783Srmacklem			 */
613191783Srmacklem			for (i = 0; i <= lbn && !error; i++) {
614191783Srmacklem			    if (np->n_direofoffset
615191783Srmacklem				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
616191783Srmacklem				    return (0);
617191783Srmacklem			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
618191783Srmacklem			    if (!bp) {
619191783Srmacklem				error = newnfs_sigintr(nmp, td);
620191783Srmacklem				return (error ? error : EINTR);
621191783Srmacklem			    }
622191783Srmacklem			    if ((bp->b_flags & B_CACHE) == 0) {
623191783Srmacklem				    bp->b_iocmd = BIO_READ;
624191783Srmacklem				    vfs_busy_pages(bp, 0);
625207082Srmacklem				    error = ncl_doio(vp, bp, cred, td, 0);
626191783Srmacklem				    /*
627191783Srmacklem				     * no error + B_INVAL == directory EOF,
628191783Srmacklem				     * use the block.
629191783Srmacklem				     */
630191783Srmacklem				    if (error == 0 && (bp->b_flags & B_INVAL))
631191783Srmacklem					    break;
632191783Srmacklem			    }
633191783Srmacklem			    /*
634191783Srmacklem			     * An error will throw away the block and the
635191783Srmacklem			     * for loop will break out.  If no error and this
636191783Srmacklem			     * is not the block we want, we throw away the
637191783Srmacklem			     * block and go for the next one via the for loop.
638191783Srmacklem			     */
639191783Srmacklem			    if (error || i < lbn)
640191783Srmacklem				    brelse(bp);
641191783Srmacklem			}
642191783Srmacklem		    }
643191783Srmacklem		    /*
644191783Srmacklem		     * The above while is repeated if we hit another cookie
645191783Srmacklem		     * error.  If we hit an error and it wasn't a cookie error,
646191783Srmacklem		     * we give up.
647191783Srmacklem		     */
648191783Srmacklem		    if (error)
649191783Srmacklem			    return (error);
650191783Srmacklem		}
651191783Srmacklem
652191783Srmacklem		/*
653191783Srmacklem		 * If not eof and read aheads are enabled, start one.
654191783Srmacklem		 * (You need the current block first, so that you have the
655191783Srmacklem		 *  directory offset cookie of the next block.)
656191783Srmacklem		 */
657191783Srmacklem		if (nmp->nm_readahead > 0 &&
658191783Srmacklem		    (bp->b_flags & B_INVAL) == 0 &&
659191783Srmacklem		    (np->n_direofoffset == 0 ||
660191783Srmacklem		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
661191783Srmacklem		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
662191783Srmacklem			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
663191783Srmacklem			if (rabp) {
664191783Srmacklem			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
665191783Srmacklem				rabp->b_flags |= B_ASYNC;
666191783Srmacklem				rabp->b_iocmd = BIO_READ;
667191783Srmacklem				vfs_busy_pages(rabp, 0);
668191783Srmacklem				if (ncl_asyncio(nmp, rabp, cred, td)) {
669191783Srmacklem				    rabp->b_flags |= B_INVAL;
670191783Srmacklem				    rabp->b_ioflags |= BIO_ERROR;
671191783Srmacklem				    vfs_unbusy_pages(rabp);
672191783Srmacklem				    brelse(rabp);
673191783Srmacklem				}
674191783Srmacklem			    } else {
675191783Srmacklem				brelse(rabp);
676191783Srmacklem			    }
677191783Srmacklem			}
678191783Srmacklem		}
679191783Srmacklem		/*
680191783Srmacklem		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
681191783Srmacklem		 * chopped for the EOF condition, we cannot tell how large
682191783Srmacklem		 * NFS directories are going to be until we hit EOF.  So
683191783Srmacklem		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
684191783Srmacklem		 * it just so happens that b_resid will effectively chop it
685191783Srmacklem		 * to EOF.  *BUT* this information is lost if the buffer goes
686191783Srmacklem		 * away and is reconstituted into a B_CACHE state ( due to
687191783Srmacklem		 * being VMIO ) later.  So we keep track of the directory eof
688191783Srmacklem		 * in np->n_direofoffset and chop it off as an extra step
689191783Srmacklem		 * right here.
690191783Srmacklem		 */
691191783Srmacklem		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
692191783Srmacklem		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
693191783Srmacklem			n = np->n_direofoffset - uio->uio_offset;
694191783Srmacklem		break;
695191783Srmacklem	    default:
696191783Srmacklem		ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
697191783Srmacklem		bp = NULL;
698191783Srmacklem		break;
699191783Srmacklem	    };
700191783Srmacklem
701191783Srmacklem	    if (n > 0) {
702239852Skib		    error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
703191783Srmacklem	    }
704191783Srmacklem	    if (vp->v_type == VLNK)
705191783Srmacklem		n = 0;
706191783Srmacklem	    if (bp != NULL)
707191783Srmacklem		brelse(bp);
708191783Srmacklem	} while (error == 0 && uio->uio_resid > 0 && n > 0);
709191783Srmacklem	return (error);
710191783Srmacklem}
711191783Srmacklem
712191783Srmacklem/*
713249077Skib * The NFS write path cannot handle iovecs with len > 1. So we need to
714191783Srmacklem * break up iovecs accordingly (restricting them to wsize).
715249077Skib * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
716249077Skib * For the ASYNC case, 2 copies are needed. The first a copy from the
717191783Srmacklem * user buffer to a staging buffer and then a second copy from the staging
718191783Srmacklem * buffer to mbufs. This can be optimized by copying from the user buffer
719249077Skib * directly into mbufs and passing the chain down, but that requires a
720191783Srmacklem * fair amount of re-working of the relevant codepaths (and can be done
721191783Srmacklem * later).
722191783Srmacklem */
723191783Srmacklemstatic int
724191783Srmacklemnfs_directio_write(vp, uiop, cred, ioflag)
725191783Srmacklem	struct vnode *vp;
726191783Srmacklem	struct uio *uiop;
727191783Srmacklem	struct ucred *cred;
728191783Srmacklem	int ioflag;
729191783Srmacklem{
730191783Srmacklem	int error;
731191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
732191783Srmacklem	struct thread *td = uiop->uio_td;
733191783Srmacklem	int size;
734191783Srmacklem	int wsize;
735249077Skib
736191783Srmacklem	mtx_lock(&nmp->nm_mtx);
737191783Srmacklem	wsize = nmp->nm_wsize;
738191783Srmacklem	mtx_unlock(&nmp->nm_mtx);
739191783Srmacklem	if (ioflag & IO_SYNC) {
740191783Srmacklem		int iomode, must_commit;
741191783Srmacklem		struct uio uio;
742191783Srmacklem		struct iovec iov;
743191783Srmacklemdo_sync:
744191783Srmacklem		while (uiop->uio_resid > 0) {
745233353Skib			size = MIN(uiop->uio_resid, wsize);
746233353Skib			size = MIN(uiop->uio_iov->iov_len, size);
747191783Srmacklem			iov.iov_base = uiop->uio_iov->iov_base;
748191783Srmacklem			iov.iov_len = size;
749191783Srmacklem			uio.uio_iov = &iov;
750191783Srmacklem			uio.uio_iovcnt = 1;
751191783Srmacklem			uio.uio_offset = uiop->uio_offset;
752191783Srmacklem			uio.uio_resid = size;
753191783Srmacklem			uio.uio_segflg = UIO_USERSPACE;
754191783Srmacklem			uio.uio_rw = UIO_WRITE;
755191783Srmacklem			uio.uio_td = td;
756191783Srmacklem			iomode = NFSWRITE_FILESYNC;
757191783Srmacklem			error = ncl_writerpc(vp, &uio, cred, &iomode,
758207082Srmacklem			    &must_commit, 0);
759249077Skib			KASSERT((must_commit == 0),
760191783Srmacklem				("ncl_directio_write: Did not commit write"));
761191783Srmacklem			if (error)
762191783Srmacklem				return (error);
763191783Srmacklem			uiop->uio_offset += size;
764191783Srmacklem			uiop->uio_resid -= size;
765191783Srmacklem			if (uiop->uio_iov->iov_len <= size) {
766191783Srmacklem				uiop->uio_iovcnt--;
767191783Srmacklem				uiop->uio_iov++;
768191783Srmacklem			} else {
769249077Skib				uiop->uio_iov->iov_base =
770191783Srmacklem					(char *)uiop->uio_iov->iov_base + size;
771191783Srmacklem				uiop->uio_iov->iov_len -= size;
772191783Srmacklem			}
773191783Srmacklem		}
774191783Srmacklem	} else {
775191783Srmacklem		struct uio *t_uio;
776191783Srmacklem		struct iovec *t_iov;
777191783Srmacklem		struct buf *bp;
778249077Skib
779191783Srmacklem		/*
780191783Srmacklem		 * Break up the write into blocksize chunks and hand these
781191783Srmacklem		 * over to nfsiod's for write back.
782249077Skib		 * Unfortunately, this incurs a copy of the data. Since
783249077Skib		 * the user could modify the buffer before the write is
784191783Srmacklem		 * initiated.
785249077Skib		 *
786191783Srmacklem		 * The obvious optimization here is that one of the 2 copies
787191783Srmacklem		 * in the async write path can be eliminated by copying the
788191783Srmacklem		 * data here directly into mbufs and passing the mbuf chain
789191783Srmacklem		 * down. But that will require a fair amount of re-working
790191783Srmacklem		 * of the code and can be done if there's enough interest
791191783Srmacklem		 * in NFS directio access.
792191783Srmacklem		 */
793191783Srmacklem		while (uiop->uio_resid > 0) {
794233353Skib			size = MIN(uiop->uio_resid, wsize);
795233353Skib			size = MIN(uiop->uio_iov->iov_len, size);
796191783Srmacklem			bp = getpbuf(&ncl_pbuf_freecnt);
797191783Srmacklem			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
798191783Srmacklem			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
799191783Srmacklem			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
800191783Srmacklem			t_iov->iov_len = size;
801191783Srmacklem			t_uio->uio_iov = t_iov;
802191783Srmacklem			t_uio->uio_iovcnt = 1;
803191783Srmacklem			t_uio->uio_offset = uiop->uio_offset;
804191783Srmacklem			t_uio->uio_resid = size;
805191783Srmacklem			t_uio->uio_segflg = UIO_SYSSPACE;
806191783Srmacklem			t_uio->uio_rw = UIO_WRITE;
807191783Srmacklem			t_uio->uio_td = td;
808232682Srmacklem			KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
809232682Srmacklem			    uiop->uio_segflg == UIO_SYSSPACE,
810232682Srmacklem			    ("nfs_directio_write: Bad uio_segflg"));
811232682Srmacklem			if (uiop->uio_segflg == UIO_USERSPACE) {
812232682Srmacklem				error = copyin(uiop->uio_iov->iov_base,
813232682Srmacklem				    t_iov->iov_base, size);
814232682Srmacklem				if (error != 0)
815232682Srmacklem					goto err_free;
816232682Srmacklem			} else
817232682Srmacklem				/*
818232682Srmacklem				 * UIO_SYSSPACE may never happen, but handle
819232682Srmacklem				 * it just in case it does.
820232682Srmacklem				 */
821232682Srmacklem				bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
822232682Srmacklem				    size);
823191783Srmacklem			bp->b_flags |= B_DIRECT;
824191783Srmacklem			bp->b_iocmd = BIO_WRITE;
825191783Srmacklem			if (cred != NOCRED) {
826191783Srmacklem				crhold(cred);
827191783Srmacklem				bp->b_wcred = cred;
828249077Skib			} else
829249077Skib				bp->b_wcred = NOCRED;
830191783Srmacklem			bp->b_caller1 = (void *)t_uio;
831191783Srmacklem			bp->b_vp = vp;
832191783Srmacklem			error = ncl_asyncio(nmp, bp, NOCRED, td);
833232682Srmacklemerr_free:
834191783Srmacklem			if (error) {
835191783Srmacklem				free(t_iov->iov_base, M_NFSDIRECTIO);
836191783Srmacklem				free(t_iov, M_NFSDIRECTIO);
837191783Srmacklem				free(t_uio, M_NFSDIRECTIO);
838191783Srmacklem				bp->b_vp = NULL;
839191783Srmacklem				relpbuf(bp, &ncl_pbuf_freecnt);
840191783Srmacklem				if (error == EINTR)
841191783Srmacklem					return (error);
842191783Srmacklem				goto do_sync;
843191783Srmacklem			}
844191783Srmacklem			uiop->uio_offset += size;
845191783Srmacklem			uiop->uio_resid -= size;
846191783Srmacklem			if (uiop->uio_iov->iov_len <= size) {
847191783Srmacklem				uiop->uio_iovcnt--;
848191783Srmacklem				uiop->uio_iov++;
849191783Srmacklem			} else {
850249077Skib				uiop->uio_iov->iov_base =
851191783Srmacklem					(char *)uiop->uio_iov->iov_base + size;
852191783Srmacklem				uiop->uio_iov->iov_len -= size;
853191783Srmacklem			}
854191783Srmacklem		}
855191783Srmacklem	}
856191783Srmacklem	return (0);
857191783Srmacklem}
858191783Srmacklem
859191783Srmacklem/*
860191783Srmacklem * Vnode op for write using bio
861191783Srmacklem */
862191783Srmacklemint
863191783Srmacklemncl_write(struct vop_write_args *ap)
864191783Srmacklem{
865191783Srmacklem	int biosize;
866191783Srmacklem	struct uio *uio = ap->a_uio;
867191783Srmacklem	struct thread *td = uio->uio_td;
868191783Srmacklem	struct vnode *vp = ap->a_vp;
869191783Srmacklem	struct nfsnode *np = VTONFS(vp);
870191783Srmacklem	struct ucred *cred = ap->a_cred;
871191783Srmacklem	int ioflag = ap->a_ioflag;
872191783Srmacklem	struct buf *bp;
873191783Srmacklem	struct vattr vattr;
874191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
875191783Srmacklem	daddr_t lbn;
876260170Srmacklem	int bcount, noncontig_write, obcount;
877239848Skib	int bp_cached, n, on, error = 0, error1;
878239845Skib	size_t orig_resid, local_resid;
879239845Skib	off_t orig_size, tmp_off;
880191783Srmacklem
881209120Skib	KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
882209120Skib	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
883209120Skib	    ("ncl_write proc"));
884191783Srmacklem	if (vp->v_type != VREG)
885191783Srmacklem		return (EIO);
886191783Srmacklem	mtx_lock(&np->n_mtx);
887191783Srmacklem	if (np->n_flag & NWRITEERR) {
888191783Srmacklem		np->n_flag &= ~NWRITEERR;
889191783Srmacklem		mtx_unlock(&np->n_mtx);
890191783Srmacklem		return (np->n_error);
891191783Srmacklem	} else
892191783Srmacklem		mtx_unlock(&np->n_mtx);
893191783Srmacklem	mtx_lock(&nmp->nm_mtx);
894191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
895191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
896191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
897191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
898191783Srmacklem		mtx_lock(&nmp->nm_mtx);
899191783Srmacklem	}
900191783Srmacklem	if (nmp->nm_wsize == 0)
901191783Srmacklem		(void) newnfs_iosize(nmp);
902191783Srmacklem	mtx_unlock(&nmp->nm_mtx);
903191783Srmacklem
904191783Srmacklem	/*
905191783Srmacklem	 * Synchronously flush pending buffers if we are in synchronous
906191783Srmacklem	 * mode or if we are appending.
907191783Srmacklem	 */
908191783Srmacklem	if (ioflag & (IO_APPEND | IO_SYNC)) {
909191783Srmacklem		mtx_lock(&np->n_mtx);
910191783Srmacklem		if (np->n_flag & NMODIFIED) {
911191783Srmacklem			mtx_unlock(&np->n_mtx);
912191783Srmacklem#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
913191783Srmacklem			/*
914191783Srmacklem			 * Require non-blocking, synchronous writes to
915191783Srmacklem			 * dirty files to inform the program it needs
916191783Srmacklem			 * to fsync(2) explicitly.
917191783Srmacklem			 */
918191783Srmacklem			if (ioflag & IO_NDELAY)
919191783Srmacklem				return (EAGAIN);
920191783Srmacklem#endif
921191783Srmacklemflush_and_restart:
922191783Srmacklem			np->n_attrstamp = 0;
923223280Srmacklem			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
924191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
925191783Srmacklem			if (error)
926191783Srmacklem				return (error);
927191783Srmacklem		} else
928191783Srmacklem			mtx_unlock(&np->n_mtx);
929191783Srmacklem	}
930191783Srmacklem
931239845Skib	orig_resid = uio->uio_resid;
932239845Skib	mtx_lock(&np->n_mtx);
933239845Skib	orig_size = np->n_size;
934239845Skib	mtx_unlock(&np->n_mtx);
935239845Skib
936191783Srmacklem	/*
937191783Srmacklem	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
938191783Srmacklem	 * get the append lock.
939191783Srmacklem	 */
940191783Srmacklem	if (ioflag & IO_APPEND) {
941191783Srmacklem		np->n_attrstamp = 0;
942223280Srmacklem		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
943191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
944191783Srmacklem		if (error)
945191783Srmacklem			return (error);
946191783Srmacklem		mtx_lock(&np->n_mtx);
947191783Srmacklem		uio->uio_offset = np->n_size;
948191783Srmacklem		mtx_unlock(&np->n_mtx);
949191783Srmacklem	}
950191783Srmacklem
951191783Srmacklem	if (uio->uio_offset < 0)
952191783Srmacklem		return (EINVAL);
953220877Srmacklem	tmp_off = uio->uio_offset + uio->uio_resid;
954220928Srmacklem	if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
955191783Srmacklem		return (EFBIG);
956191783Srmacklem	if (uio->uio_resid == 0)
957191783Srmacklem		return (0);
958191783Srmacklem
959191783Srmacklem	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
960191783Srmacklem		return nfs_directio_write(vp, uio, cred, ioflag);
961191783Srmacklem
962191783Srmacklem	/*
963191783Srmacklem	 * Maybe this should be above the vnode op call, but so long as
964191783Srmacklem	 * file servers have no limits, i don't think it matters
965191783Srmacklem	 */
966207662Strasz	if (vn_rlimit_fsize(vp, uio, td))
967207662Strasz		return (EFBIG);
968191783Srmacklem
969231330Srmacklem	biosize = vp->v_bufobj.bo_bsize;
970191783Srmacklem	/*
971191783Srmacklem	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
972191783Srmacklem	 * would exceed the local maximum per-file write commit size when
973191783Srmacklem	 * combined with those, we must decide whether to flush,
974191783Srmacklem	 * go synchronous, or return error.  We don't bother checking
975191783Srmacklem	 * IO_UNIT -- we just make all writes atomic anyway, as there's
976191783Srmacklem	 * no point optimizing for something that really won't ever happen.
977191783Srmacklem	 */
978191783Srmacklem	if (!(ioflag & IO_SYNC)) {
979191783Srmacklem		int nflag;
980191783Srmacklem
981191783Srmacklem		mtx_lock(&np->n_mtx);
982191783Srmacklem		nflag = np->n_flag;
983249077Skib		mtx_unlock(&np->n_mtx);
984191783Srmacklem		int needrestart = 0;
985191783Srmacklem		if (nmp->nm_wcommitsize < uio->uio_resid) {
986191783Srmacklem			/*
987191783Srmacklem			 * If this request could not possibly be completed
988191783Srmacklem			 * without exceeding the maximum outstanding write
989191783Srmacklem			 * commit size, see if we can convert it into a
990191783Srmacklem			 * synchronous write operation.
991191783Srmacklem			 */
992191783Srmacklem			if (ioflag & IO_NDELAY)
993191783Srmacklem				return (EAGAIN);
994191783Srmacklem			ioflag |= IO_SYNC;
995191783Srmacklem			if (nflag & NMODIFIED)
996191783Srmacklem				needrestart = 1;
997191783Srmacklem		} else if (nflag & NMODIFIED) {
998191783Srmacklem			int wouldcommit = 0;
999191783Srmacklem			BO_LOCK(&vp->v_bufobj);
1000191783Srmacklem			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1001191783Srmacklem				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1002191783Srmacklem				    b_bobufs) {
1003191783Srmacklem					if (bp->b_flags & B_NEEDCOMMIT)
1004191783Srmacklem						wouldcommit += bp->b_bcount;
1005191783Srmacklem				}
1006191783Srmacklem			}
1007191783Srmacklem			BO_UNLOCK(&vp->v_bufobj);
1008191783Srmacklem			/*
1009191783Srmacklem			 * Since we're not operating synchronously and
1010191783Srmacklem			 * bypassing the buffer cache, we are in a commit
1011191783Srmacklem			 * and holding all of these buffers whether
1012191783Srmacklem			 * transmitted or not.  If not limited, this
1013191783Srmacklem			 * will lead to the buffer cache deadlocking,
1014191783Srmacklem			 * as no one else can flush our uncommitted buffers.
1015191783Srmacklem			 */
1016191783Srmacklem			wouldcommit += uio->uio_resid;
1017191783Srmacklem			/*
1018191783Srmacklem			 * If we would initially exceed the maximum
1019191783Srmacklem			 * outstanding write commit size, flush and restart.
1020191783Srmacklem			 */
1021191783Srmacklem			if (wouldcommit > nmp->nm_wcommitsize)
1022191783Srmacklem				needrestart = 1;
1023191783Srmacklem		}
1024191783Srmacklem		if (needrestart)
1025191783Srmacklem			goto flush_and_restart;
1026191783Srmacklem	}
1027191783Srmacklem
1028191783Srmacklem	do {
1029191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_writes);
1030191783Srmacklem		lbn = uio->uio_offset / biosize;
1031191783Srmacklem		on = uio->uio_offset & (biosize-1);
1032233353Skib		n = MIN((unsigned)(biosize - on), uio->uio_resid);
1033191783Srmacklemagain:
1034191783Srmacklem		/*
1035191783Srmacklem		 * Handle direct append and file extension cases, calculate
1036191783Srmacklem		 * unaligned buffer size.
1037191783Srmacklem		 */
1038191783Srmacklem		mtx_lock(&np->n_mtx);
1039260170Srmacklem		if ((np->n_flag & NHASBEENLOCKED) == 0 &&
1040260170Srmacklem		    (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0)
1041260170Srmacklem			noncontig_write = 1;
1042260170Srmacklem		else
1043260170Srmacklem			noncontig_write = 0;
1044260170Srmacklem		if ((uio->uio_offset == np->n_size ||
1045260170Srmacklem		    (noncontig_write != 0 &&
1046260170Srmacklem		    lbn == (np->n_size / biosize) &&
1047260170Srmacklem		    uio->uio_offset + n > np->n_size)) && n) {
1048191783Srmacklem			mtx_unlock(&np->n_mtx);
1049191783Srmacklem			/*
1050191783Srmacklem			 * Get the buffer (in its pre-append state to maintain
1051191783Srmacklem			 * B_CACHE if it was previously set).  Resize the
1052191783Srmacklem			 * nfsnode after we have locked the buffer to prevent
1053191783Srmacklem			 * readers from reading garbage.
1054191783Srmacklem			 */
1055260170Srmacklem			obcount = np->n_size - (lbn * biosize);
1056260170Srmacklem			bp = nfs_getcacheblk(vp, lbn, obcount, td);
1057191783Srmacklem
1058191783Srmacklem			if (bp != NULL) {
1059191783Srmacklem				long save;
1060191783Srmacklem
1061191783Srmacklem				mtx_lock(&np->n_mtx);
1062191783Srmacklem				np->n_size = uio->uio_offset + n;
1063191783Srmacklem				np->n_flag |= NMODIFIED;
1064191783Srmacklem				vnode_pager_setsize(vp, np->n_size);
1065191783Srmacklem				mtx_unlock(&np->n_mtx);
1066191783Srmacklem
1067191783Srmacklem				save = bp->b_flags & B_CACHE;
1068260170Srmacklem				bcount = on + n;
1069191783Srmacklem				allocbuf(bp, bcount);
1070191783Srmacklem				bp->b_flags |= save;
1071260170Srmacklem				if (noncontig_write != 0 && on > obcount)
1072260170Srmacklem					vfs_bio_bzero_buf(bp, obcount, on -
1073260170Srmacklem					    obcount);
1074191783Srmacklem			}
1075191783Srmacklem		} else {
1076191783Srmacklem			/*
1077191783Srmacklem			 * Obtain the locked cache block first, and then
1078191783Srmacklem			 * adjust the file's size as appropriate.
1079191783Srmacklem			 */
1080191783Srmacklem			bcount = on + n;
1081191783Srmacklem			if ((off_t)lbn * biosize + bcount < np->n_size) {
1082191783Srmacklem				if ((off_t)(lbn + 1) * biosize < np->n_size)
1083191783Srmacklem					bcount = biosize;
1084191783Srmacklem				else
1085191783Srmacklem					bcount = np->n_size - (off_t)lbn * biosize;
1086191783Srmacklem			}
1087191783Srmacklem			mtx_unlock(&np->n_mtx);
1088191783Srmacklem			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1089191783Srmacklem			mtx_lock(&np->n_mtx);
1090191783Srmacklem			if (uio->uio_offset + n > np->n_size) {
1091191783Srmacklem				np->n_size = uio->uio_offset + n;
1092191783Srmacklem				np->n_flag |= NMODIFIED;
1093191783Srmacklem				vnode_pager_setsize(vp, np->n_size);
1094191783Srmacklem			}
1095191783Srmacklem			mtx_unlock(&np->n_mtx);
1096191783Srmacklem		}
1097191783Srmacklem
1098191783Srmacklem		if (!bp) {
1099191783Srmacklem			error = newnfs_sigintr(nmp, td);
1100191783Srmacklem			if (!error)
1101191783Srmacklem				error = EINTR;
1102191783Srmacklem			break;
1103191783Srmacklem		}
1104191783Srmacklem
1105191783Srmacklem		/*
1106191783Srmacklem		 * Issue a READ if B_CACHE is not set.  In special-append
1107191783Srmacklem		 * mode, B_CACHE is based on the buffer prior to the write
1108191783Srmacklem		 * op and is typically set, avoiding the read.  If a read
1109191783Srmacklem		 * is required in special append mode, the server will
1110191783Srmacklem		 * probably send us a short-read since we extended the file
1111191783Srmacklem		 * on our end, resulting in b_resid == 0 and, thusly,
1112191783Srmacklem		 * B_CACHE getting set.
1113191783Srmacklem		 *
1114191783Srmacklem		 * We can also avoid issuing the read if the write covers
1115191783Srmacklem		 * the entire buffer.  We have to make sure the buffer state
1116191783Srmacklem		 * is reasonable in this case since we will not be initiating
1117191783Srmacklem		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1118191783Srmacklem		 * more information.
1119191783Srmacklem		 *
1120191783Srmacklem		 * B_CACHE may also be set due to the buffer being cached
1121191783Srmacklem		 * normally.
1122191783Srmacklem		 */
1123191783Srmacklem
1124239845Skib		bp_cached = 1;
1125191783Srmacklem		if (on == 0 && n == bcount) {
1126239845Skib			if ((bp->b_flags & B_CACHE) == 0)
1127239845Skib				bp_cached = 0;
1128191783Srmacklem			bp->b_flags |= B_CACHE;
1129191783Srmacklem			bp->b_flags &= ~B_INVAL;
1130191783Srmacklem			bp->b_ioflags &= ~BIO_ERROR;
1131191783Srmacklem		}
1132191783Srmacklem
1133191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
1134191783Srmacklem			bp->b_iocmd = BIO_READ;
1135191783Srmacklem			vfs_busy_pages(bp, 0);
1136207082Srmacklem			error = ncl_doio(vp, bp, cred, td, 0);
1137191783Srmacklem			if (error) {
1138191783Srmacklem				brelse(bp);
1139191783Srmacklem				break;
1140191783Srmacklem			}
1141191783Srmacklem		}
1142191783Srmacklem		if (bp->b_wcred == NOCRED)
1143191783Srmacklem			bp->b_wcred = crhold(cred);
1144191783Srmacklem		mtx_lock(&np->n_mtx);
1145191783Srmacklem		np->n_flag |= NMODIFIED;
1146191783Srmacklem		mtx_unlock(&np->n_mtx);
1147191783Srmacklem
1148191783Srmacklem		/*
1149191783Srmacklem		 * If dirtyend exceeds file size, chop it down.  This should
1150191783Srmacklem		 * not normally occur but there is an append race where it
1151191783Srmacklem		 * might occur XXX, so we log it.
1152191783Srmacklem		 *
1153191783Srmacklem		 * If the chopping creates a reverse-indexed or degenerate
1154191783Srmacklem		 * situation with dirtyoff/end, we 0 both of them.
1155191783Srmacklem		 */
1156191783Srmacklem
1157191783Srmacklem		if (bp->b_dirtyend > bcount) {
1158191783Srmacklem			ncl_printf("NFS append race @%lx:%d\n",
1159191783Srmacklem			    (long)bp->b_blkno * DEV_BSIZE,
1160191783Srmacklem			    bp->b_dirtyend - bcount);
1161191783Srmacklem			bp->b_dirtyend = bcount;
1162191783Srmacklem		}
1163191783Srmacklem
1164191783Srmacklem		if (bp->b_dirtyoff >= bp->b_dirtyend)
1165191783Srmacklem			bp->b_dirtyoff = bp->b_dirtyend = 0;
1166191783Srmacklem
1167191783Srmacklem		/*
1168191783Srmacklem		 * If the new write will leave a contiguous dirty
1169191783Srmacklem		 * area, just update the b_dirtyoff and b_dirtyend,
1170191783Srmacklem		 * otherwise force a write rpc of the old dirty area.
1171191783Srmacklem		 *
1172260170Srmacklem		 * If there has been a file lock applied to this file
1173260170Srmacklem		 * or vfs.nfs.old_noncontig_writing is set, do the following:
1174191783Srmacklem		 * While it is possible to merge discontiguous writes due to
1175191783Srmacklem		 * our having a B_CACHE buffer ( and thus valid read data
1176191783Srmacklem		 * for the hole), we don't because it could lead to
1177191783Srmacklem		 * significant cache coherency problems with multiple clients,
1178191783Srmacklem		 * especially if locking is implemented later on.
1179191783Srmacklem		 *
1180260170Srmacklem		 * If vfs.nfs.old_noncontig_writing is not set and there has
1181260170Srmacklem		 * not been file locking done on this file:
1182260170Srmacklem		 * Relax coherency a bit for the sake of performance and
1183260170Srmacklem		 * expand the current dirty region to contain the new
1184260170Srmacklem		 * write even if it means we mark some non-dirty data as
1185260170Srmacklem		 * dirty.
1186191783Srmacklem		 */
1187191783Srmacklem
1188260170Srmacklem		if (noncontig_write == 0 && bp->b_dirtyend > 0 &&
1189191783Srmacklem		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1190191783Srmacklem			if (bwrite(bp) == EINTR) {
1191191783Srmacklem				error = EINTR;
1192191783Srmacklem				break;
1193191783Srmacklem			}
1194191783Srmacklem			goto again;
1195191783Srmacklem		}
1196191783Srmacklem
1197239845Skib		local_resid = uio->uio_resid;
1198239852Skib		error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
1199191783Srmacklem
1200239845Skib		if (error != 0 && !bp_cached) {
1201239845Skib			/*
1202239845Skib			 * This block has no other content then what
1203239845Skib			 * possibly was written by the faulty uiomove.
1204239845Skib			 * Release it, forgetting the data pages, to
1205239845Skib			 * prevent the leak of uninitialized data to
1206239845Skib			 * usermode.
1207239845Skib			 */
1208239845Skib			bp->b_ioflags |= BIO_ERROR;
1209239845Skib			brelse(bp);
1210239845Skib			uio->uio_offset -= local_resid - uio->uio_resid;
1211239845Skib			uio->uio_resid = local_resid;
1212239845Skib			break;
1213239845Skib		}
1214239845Skib
1215191783Srmacklem		/*
1216191783Srmacklem		 * Since this block is being modified, it must be written
1217191783Srmacklem		 * again and not just committed.  Since write clustering does
1218191783Srmacklem		 * not work for the stage 1 data write, only the stage 2
1219191783Srmacklem		 * commit rpc, we have to clear B_CLUSTEROK as well.
1220191783Srmacklem		 */
1221191783Srmacklem		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1222191783Srmacklem
1223239845Skib		/*
1224239845Skib		 * Get the partial update on the progress made from
1225239845Skib		 * uiomove, if an error occured.
1226239845Skib		 */
1227239845Skib		if (error != 0)
1228239845Skib			n = local_resid - uio->uio_resid;
1229191783Srmacklem
1230191783Srmacklem		/*
1231191783Srmacklem		 * Only update dirtyoff/dirtyend if not a degenerate
1232191783Srmacklem		 * condition.
1233191783Srmacklem		 */
1234239845Skib		if (n > 0) {
1235191783Srmacklem			if (bp->b_dirtyend > 0) {
1236191783Srmacklem				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1237191783Srmacklem				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1238191783Srmacklem			} else {
1239191783Srmacklem				bp->b_dirtyoff = on;
1240191783Srmacklem				bp->b_dirtyend = on + n;
1241191783Srmacklem			}
1242193187Salc			vfs_bio_set_valid(bp, on, n);
1243191783Srmacklem		}
1244191783Srmacklem
1245191783Srmacklem		/*
1246191783Srmacklem		 * If IO_SYNC do bwrite().
1247191783Srmacklem		 *
1248191783Srmacklem		 * IO_INVAL appears to be unused.  The idea appears to be
1249191783Srmacklem		 * to turn off caching in this case.  Very odd.  XXX
1250191783Srmacklem		 */
1251191783Srmacklem		if ((ioflag & IO_SYNC)) {
1252191783Srmacklem			if (ioflag & IO_INVAL)
1253191783Srmacklem				bp->b_flags |= B_NOCACHE;
1254239848Skib			error1 = bwrite(bp);
1255239848Skib			if (error1 != 0) {
1256239848Skib				if (error == 0)
1257239848Skib					error = error1;
1258191783Srmacklem				break;
1259239848Skib			}
1260191783Srmacklem		} else if ((n + on) == biosize) {
1261191783Srmacklem			bp->b_flags |= B_ASYNC;
1262191783Srmacklem			(void) ncl_writebp(bp, 0, NULL);
1263191783Srmacklem		} else {
1264191783Srmacklem			bdwrite(bp);
1265191783Srmacklem		}
1266239845Skib
1267239845Skib		if (error != 0)
1268239845Skib			break;
1269191783Srmacklem	} while (uio->uio_resid > 0 && n > 0);
1270191783Srmacklem
1271239845Skib	if (error != 0) {
1272239845Skib		if (ioflag & IO_UNIT) {
1273239845Skib			VATTR_NULL(&vattr);
1274239845Skib			vattr.va_size = orig_size;
1275239845Skib			/* IO_SYNC is handled implicitely */
1276239845Skib			(void)VOP_SETATTR(vp, &vattr, cred);
1277239845Skib			uio->uio_offset -= orig_resid - uio->uio_resid;
1278239845Skib			uio->uio_resid = orig_resid;
1279239845Skib		}
1280239845Skib	}
1281239845Skib
1282191783Srmacklem	return (error);
1283191783Srmacklem}
1284191783Srmacklem
1285191783Srmacklem/*
1286191783Srmacklem * Get an nfs cache block.
1287191783Srmacklem *
1288191783Srmacklem * Allocate a new one if the block isn't currently in the cache
1289191783Srmacklem * and return the block marked busy. If the calling process is
1290191783Srmacklem * interrupted by a signal for an interruptible mount point, return
1291191783Srmacklem * NULL.
1292191783Srmacklem *
1293191783Srmacklem * The caller must carefully deal with the possible B_INVAL state of
1294191783Srmacklem * the buffer.  ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1295191783Srmacklem * indirectly), so synchronous reads can be issued without worrying about
1296191783Srmacklem * the B_INVAL state.  We have to be a little more careful when dealing
1297191783Srmacklem * with writes (see comments in nfs_write()) when extending a file past
1298191783Srmacklem * its EOF.
1299191783Srmacklem */
1300191783Srmacklemstatic struct buf *
1301191783Srmacklemnfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1302191783Srmacklem{
1303191783Srmacklem	struct buf *bp;
1304191783Srmacklem	struct mount *mp;
1305191783Srmacklem	struct nfsmount *nmp;
1306191783Srmacklem
1307191783Srmacklem	mp = vp->v_mount;
1308191783Srmacklem	nmp = VFSTONFS(mp);
1309191783Srmacklem
1310191783Srmacklem	if (nmp->nm_flag & NFSMNT_INT) {
1311249077Skib		sigset_t oldset;
1312191783Srmacklem
1313249077Skib		newnfs_set_sigmask(td, &oldset);
1314195821Srmacklem		bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1315249077Skib		newnfs_restore_sigmask(td, &oldset);
1316191783Srmacklem		while (bp == NULL) {
1317191783Srmacklem			if (newnfs_sigintr(nmp, td))
1318191783Srmacklem				return (NULL);
1319191783Srmacklem			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1320191783Srmacklem		}
1321191783Srmacklem	} else {
1322191783Srmacklem		bp = getblk(vp, bn, size, 0, 0, 0);
1323191783Srmacklem	}
1324191783Srmacklem
1325231330Srmacklem	if (vp->v_type == VREG)
1326231330Srmacklem		bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1327191783Srmacklem	return (bp);
1328191783Srmacklem}
1329191783Srmacklem
1330191783Srmacklem/*
1331191783Srmacklem * Flush and invalidate all dirty buffers. If another process is already
1332191783Srmacklem * doing the flush, just wait for completion.
1333191783Srmacklem */
1334191783Srmacklemint
1335191783Srmacklemncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1336191783Srmacklem{
1337191783Srmacklem	struct nfsnode *np = VTONFS(vp);
1338191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1339191783Srmacklem	int error = 0, slpflag, slptimeo;
1340249077Skib	int old_lock = 0;
1341191783Srmacklem
1342191783Srmacklem	ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1343191783Srmacklem
1344191783Srmacklem	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1345191783Srmacklem		intrflg = 0;
1346191783Srmacklem	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1347191783Srmacklem		intrflg = 1;
1348191783Srmacklem	if (intrflg) {
1349195821Srmacklem		slpflag = NFS_PCATCH;
1350191783Srmacklem		slptimeo = 2 * hz;
1351191783Srmacklem	} else {
1352191783Srmacklem		slpflag = 0;
1353191783Srmacklem		slptimeo = 0;
1354191783Srmacklem	}
1355191783Srmacklem
1356191783Srmacklem	old_lock = ncl_upgrade_vnlock(vp);
1357193955Srmacklem	if (vp->v_iflag & VI_DOOMED) {
1358193955Srmacklem		/*
1359193955Srmacklem		 * Since vgonel() uses the generic vinvalbuf() to flush
1360193955Srmacklem		 * dirty buffers and it does not call this function, it
1361193955Srmacklem		 * is safe to just return OK when VI_DOOMED is set.
1362193955Srmacklem		 */
1363193955Srmacklem		ncl_downgrade_vnlock(vp, old_lock);
1364193955Srmacklem		return (0);
1365193955Srmacklem	}
1366193955Srmacklem
1367191783Srmacklem	/*
1368191783Srmacklem	 * Now, flush as required.
1369191783Srmacklem	 */
1370191783Srmacklem	if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1371191783Srmacklem		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1372191783Srmacklem		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1373191783Srmacklem		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1374191783Srmacklem		/*
1375191783Srmacklem		 * If the page clean was interrupted, fail the invalidation.
1376249077Skib		 * Not doing so, we run the risk of losing dirty pages in the
1377191783Srmacklem		 * vinvalbuf() call below.
1378191783Srmacklem		 */
1379191783Srmacklem		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1380191783Srmacklem			goto out;
1381191783Srmacklem	}
1382191783Srmacklem
1383191783Srmacklem	error = vinvalbuf(vp, flags, slpflag, 0);
1384191783Srmacklem	while (error) {
1385191783Srmacklem		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1386191783Srmacklem			goto out;
1387191783Srmacklem		error = vinvalbuf(vp, flags, 0, slptimeo);
1388191783Srmacklem	}
1389191783Srmacklem	mtx_lock(&np->n_mtx);
1390191783Srmacklem	if (np->n_directio_asyncwr == 0)
1391191783Srmacklem		np->n_flag &= ~NMODIFIED;
1392191783Srmacklem	mtx_unlock(&np->n_mtx);
1393191783Srmacklemout:
1394191783Srmacklem	ncl_downgrade_vnlock(vp, old_lock);
1395191783Srmacklem	return error;
1396191783Srmacklem}
1397191783Srmacklem
1398191783Srmacklem/*
1399191783Srmacklem * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1400191783Srmacklem * This is mainly to avoid queueing async I/O requests when the nfsiods
1401191783Srmacklem * are all hung on a dead server.
1402191783Srmacklem *
1403191783Srmacklem * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1404191783Srmacklem * is eventually dequeued by the async daemon, ncl_doio() *will*.
1405191783Srmacklem */
1406191783Srmacklemint
1407191783Srmacklemncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1408191783Srmacklem{
1409191783Srmacklem	int iod;
1410191783Srmacklem	int gotiod;
1411191783Srmacklem	int slpflag = 0;
1412191783Srmacklem	int slptimeo = 0;
1413191783Srmacklem	int error, error2;
1414191783Srmacklem
1415191783Srmacklem	/*
1416191783Srmacklem	 * Commits are usually short and sweet so lets save some cpu and
1417191783Srmacklem	 * leave the async daemons for more important rpc's (such as reads
1418191783Srmacklem	 * and writes).
1419250257Srmacklem	 *
1420250257Srmacklem	 * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1421250257Srmacklem	 * in the directory in order to update attributes. This can deadlock
1422250257Srmacklem	 * with another thread that is waiting for async I/O to be done by
1423250257Srmacklem	 * an nfsiod thread while holding a lock on one of these vnodes.
1424250257Srmacklem	 * To avoid this deadlock, don't allow the async nfsiod threads to
1425250257Srmacklem	 * perform Readdirplus RPCs.
1426191783Srmacklem	 */
1427191783Srmacklem	mtx_lock(&ncl_iod_mutex);
1428250257Srmacklem	if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1429250257Srmacklem	     (nmp->nm_bufqiods > ncl_numasync / 2)) ||
1430250257Srmacklem	    (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
1431191783Srmacklem		mtx_unlock(&ncl_iod_mutex);
1432191783Srmacklem		return(EIO);
1433191783Srmacklem	}
1434191783Srmacklemagain:
1435191783Srmacklem	if (nmp->nm_flag & NFSMNT_INT)
1436195821Srmacklem		slpflag = NFS_PCATCH;
1437191783Srmacklem	gotiod = FALSE;
1438191783Srmacklem
1439191783Srmacklem	/*
1440191783Srmacklem	 * Find a free iod to process this request.
1441191783Srmacklem	 */
1442191783Srmacklem	for (iod = 0; iod < ncl_numasync; iod++)
1443203119Srmacklem		if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1444191783Srmacklem			gotiod = TRUE;
1445191783Srmacklem			break;
1446191783Srmacklem		}
1447191783Srmacklem
1448191783Srmacklem	/*
1449191783Srmacklem	 * Try to create one if none are free.
1450191783Srmacklem	 */
1451220683Srmacklem	if (!gotiod)
1452220683Srmacklem		ncl_nfsiodnew();
1453220683Srmacklem	else {
1454191783Srmacklem		/*
1455191783Srmacklem		 * Found one, so wake it up and tell it which
1456191783Srmacklem		 * mount to process.
1457191783Srmacklem		 */
1458191783Srmacklem		NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1459191783Srmacklem		    iod, nmp));
1460203119Srmacklem		ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1461191783Srmacklem		ncl_iodmount[iod] = nmp;
1462191783Srmacklem		nmp->nm_bufqiods++;
1463191783Srmacklem		wakeup(&ncl_iodwant[iod]);
1464191783Srmacklem	}
1465191783Srmacklem
1466191783Srmacklem	/*
1467191783Srmacklem	 * If none are free, we may already have an iod working on this mount
1468191783Srmacklem	 * point.  If so, it will process our request.
1469191783Srmacklem	 */
1470191783Srmacklem	if (!gotiod) {
1471191783Srmacklem		if (nmp->nm_bufqiods > 0) {
1472191783Srmacklem			NFS_DPF(ASYNCIO,
1473191783Srmacklem				("ncl_asyncio: %d iods are already processing mount %p\n",
1474191783Srmacklem				 nmp->nm_bufqiods, nmp));
1475191783Srmacklem			gotiod = TRUE;
1476191783Srmacklem		}
1477191783Srmacklem	}
1478191783Srmacklem
1479191783Srmacklem	/*
1480191783Srmacklem	 * If we have an iod which can process the request, then queue
1481191783Srmacklem	 * the buffer.
1482191783Srmacklem	 */
1483191783Srmacklem	if (gotiod) {
1484191783Srmacklem		/*
1485191783Srmacklem		 * Ensure that the queue never grows too large.  We still want
1486191783Srmacklem		 * to asynchronize so we block rather then return EIO.
1487191783Srmacklem		 */
1488191783Srmacklem		while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1489191783Srmacklem			NFS_DPF(ASYNCIO,
1490191783Srmacklem				("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1491191783Srmacklem			nmp->nm_bufqwant = TRUE;
1492249077Skib			error = newnfs_msleep(td, &nmp->nm_bufq,
1493201029Srmacklem			    &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1494249077Skib			   slptimeo);
1495191783Srmacklem			if (error) {
1496191783Srmacklem				error2 = newnfs_sigintr(nmp, td);
1497191783Srmacklem				if (error2) {
1498249077Skib					mtx_unlock(&ncl_iod_mutex);
1499191783Srmacklem					return (error2);
1500191783Srmacklem				}
1501195821Srmacklem				if (slpflag == NFS_PCATCH) {
1502191783Srmacklem					slpflag = 0;
1503191783Srmacklem					slptimeo = 2 * hz;
1504191783Srmacklem				}
1505191783Srmacklem			}
1506191783Srmacklem			/*
1507191783Srmacklem			 * We might have lost our iod while sleeping,
1508191783Srmacklem			 * so check and loop if nescessary.
1509191783Srmacklem			 */
1510220683Srmacklem			goto again;
1511191783Srmacklem		}
1512191783Srmacklem
1513191783Srmacklem		/* We might have lost our nfsiod */
1514191783Srmacklem		if (nmp->nm_bufqiods == 0) {
1515191783Srmacklem			NFS_DPF(ASYNCIO,
1516191783Srmacklem				("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1517191783Srmacklem			goto again;
1518191783Srmacklem		}
1519191783Srmacklem
1520191783Srmacklem		if (bp->b_iocmd == BIO_READ) {
1521191783Srmacklem			if (bp->b_rcred == NOCRED && cred != NOCRED)
1522191783Srmacklem				bp->b_rcred = crhold(cred);
1523191783Srmacklem		} else {
1524191783Srmacklem			if (bp->b_wcred == NOCRED && cred != NOCRED)
1525191783Srmacklem				bp->b_wcred = crhold(cred);
1526191783Srmacklem		}
1527191783Srmacklem
1528191783Srmacklem		if (bp->b_flags & B_REMFREE)
1529191783Srmacklem			bremfreef(bp);
1530191783Srmacklem		BUF_KERNPROC(bp);
1531191783Srmacklem		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1532191783Srmacklem		nmp->nm_bufqlen++;
1533191783Srmacklem		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1534249077Skib			mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1535191783Srmacklem			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1536191783Srmacklem			VTONFS(bp->b_vp)->n_directio_asyncwr++;
1537191783Srmacklem			mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1538191783Srmacklem		}
1539191783Srmacklem		mtx_unlock(&ncl_iod_mutex);
1540191783Srmacklem		return (0);
1541191783Srmacklem	}
1542191783Srmacklem
1543191783Srmacklem	mtx_unlock(&ncl_iod_mutex);
1544191783Srmacklem
1545191783Srmacklem	/*
1546191783Srmacklem	 * All the iods are busy on other mounts, so return EIO to
1547191783Srmacklem	 * force the caller to process the i/o synchronously.
1548191783Srmacklem	 */
1549191783Srmacklem	NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1550191783Srmacklem	return (EIO);
1551191783Srmacklem}
1552191783Srmacklem
1553191783Srmacklemvoid
1554191783Srmacklemncl_doio_directwrite(struct buf *bp)
1555191783Srmacklem{
1556191783Srmacklem	int iomode, must_commit;
1557191783Srmacklem	struct uio *uiop = (struct uio *)bp->b_caller1;
1558191783Srmacklem	char *iov_base = uiop->uio_iov->iov_base;
1559249077Skib
1560191783Srmacklem	iomode = NFSWRITE_FILESYNC;
1561191783Srmacklem	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1562207082Srmacklem	ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1563191783Srmacklem	KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1564191783Srmacklem	free(iov_base, M_NFSDIRECTIO);
1565191783Srmacklem	free(uiop->uio_iov, M_NFSDIRECTIO);
1566191783Srmacklem	free(uiop, M_NFSDIRECTIO);
1567191783Srmacklem	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1568191783Srmacklem		struct nfsnode *np = VTONFS(bp->b_vp);
1569191783Srmacklem		mtx_lock(&np->n_mtx);
1570191783Srmacklem		np->n_directio_asyncwr--;
1571191783Srmacklem		if (np->n_directio_asyncwr == 0) {
1572191783Srmacklem			np->n_flag &= ~NMODIFIED;
1573191783Srmacklem			if ((np->n_flag & NFSYNCWAIT)) {
1574191783Srmacklem				np->n_flag &= ~NFSYNCWAIT;
1575191783Srmacklem				wakeup((caddr_t)&np->n_directio_asyncwr);
1576191783Srmacklem			}
1577191783Srmacklem		}
1578191783Srmacklem		mtx_unlock(&np->n_mtx);
1579191783Srmacklem	}
1580191783Srmacklem	bp->b_vp = NULL;
1581191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
1582191783Srmacklem}
1583191783Srmacklem
1584191783Srmacklem/*
1585191783Srmacklem * Do an I/O operation to/from a cache block. This may be called
1586191783Srmacklem * synchronously or from an nfsiod.
1587191783Srmacklem */
1588191783Srmacklemint
1589207082Srmacklemncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1590207082Srmacklem    int called_from_strategy)
1591191783Srmacklem{
1592191783Srmacklem	struct uio *uiop;
1593191783Srmacklem	struct nfsnode *np;
1594191783Srmacklem	struct nfsmount *nmp;
1595191783Srmacklem	int error = 0, iomode, must_commit = 0;
1596191783Srmacklem	struct uio uio;
1597191783Srmacklem	struct iovec io;
1598191783Srmacklem	struct proc *p = td ? td->td_proc : NULL;
1599191783Srmacklem	uint8_t	iocmd;
1600249077Skib
1601191783Srmacklem	np = VTONFS(vp);
1602191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
1603191783Srmacklem	uiop = &uio;
1604191783Srmacklem	uiop->uio_iov = &io;
1605191783Srmacklem	uiop->uio_iovcnt = 1;
1606191783Srmacklem	uiop->uio_segflg = UIO_SYSSPACE;
1607191783Srmacklem	uiop->uio_td = td;
1608191783Srmacklem
1609191783Srmacklem	/*
1610191783Srmacklem	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1611191783Srmacklem	 * do this here so we do not have to do it in all the code that
1612191783Srmacklem	 * calls us.
1613191783Srmacklem	 */
1614191783Srmacklem	bp->b_flags &= ~B_INVAL;
1615191783Srmacklem	bp->b_ioflags &= ~BIO_ERROR;
1616191783Srmacklem
1617191783Srmacklem	KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1618191783Srmacklem	iocmd = bp->b_iocmd;
1619191783Srmacklem	if (iocmd == BIO_READ) {
1620191783Srmacklem	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1621191783Srmacklem	    io.iov_base = bp->b_data;
1622191783Srmacklem	    uiop->uio_rw = UIO_READ;
1623191783Srmacklem
1624191783Srmacklem	    switch (vp->v_type) {
1625191783Srmacklem	    case VREG:
1626191783Srmacklem		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1627191783Srmacklem		NFSINCRGLOBAL(newnfsstats.read_bios);
1628191783Srmacklem		error = ncl_readrpc(vp, uiop, cr);
1629191783Srmacklem
1630191783Srmacklem		if (!error) {
1631191783Srmacklem		    if (uiop->uio_resid) {
1632191783Srmacklem			/*
1633191783Srmacklem			 * If we had a short read with no error, we must have
1634191783Srmacklem			 * hit a file hole.  We should zero-fill the remainder.
1635191783Srmacklem			 * This can also occur if the server hits the file EOF.
1636191783Srmacklem			 *
1637191783Srmacklem			 * Holes used to be able to occur due to pending
1638191783Srmacklem			 * writes, but that is not possible any longer.
1639191783Srmacklem			 */
1640191783Srmacklem			int nread = bp->b_bcount - uiop->uio_resid;
1641233353Skib			ssize_t left = uiop->uio_resid;
1642191783Srmacklem
1643191783Srmacklem			if (left > 0)
1644191783Srmacklem				bzero((char *)bp->b_data + nread, left);
1645191783Srmacklem			uiop->uio_resid = 0;
1646191783Srmacklem		    }
1647191783Srmacklem		}
1648191783Srmacklem		/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1649191783Srmacklem		if (p && (vp->v_vflag & VV_TEXT)) {
1650191783Srmacklem			mtx_lock(&np->n_mtx);
1651191783Srmacklem			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1652191783Srmacklem				mtx_unlock(&np->n_mtx);
1653191783Srmacklem				PROC_LOCK(p);
1654191783Srmacklem				killproc(p, "text file modification");
1655191783Srmacklem				PROC_UNLOCK(p);
1656191783Srmacklem			} else
1657191783Srmacklem				mtx_unlock(&np->n_mtx);
1658191783Srmacklem		}
1659191783Srmacklem		break;
1660191783Srmacklem	    case VLNK:
1661191783Srmacklem		uiop->uio_offset = (off_t)0;
1662191783Srmacklem		NFSINCRGLOBAL(newnfsstats.readlink_bios);
1663191783Srmacklem		error = ncl_readlinkrpc(vp, uiop, cr);
1664191783Srmacklem		break;
1665191783Srmacklem	    case VDIR:
1666191783Srmacklem		NFSINCRGLOBAL(newnfsstats.readdir_bios);
1667191783Srmacklem		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1668191783Srmacklem		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1669191783Srmacklem			error = ncl_readdirplusrpc(vp, uiop, cr, td);
1670191783Srmacklem			if (error == NFSERR_NOTSUPP)
1671191783Srmacklem				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1672191783Srmacklem		}
1673191783Srmacklem		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1674191783Srmacklem			error = ncl_readdirrpc(vp, uiop, cr, td);
1675191783Srmacklem		/*
1676191783Srmacklem		 * end-of-directory sets B_INVAL but does not generate an
1677191783Srmacklem		 * error.
1678191783Srmacklem		 */
1679191783Srmacklem		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1680191783Srmacklem			bp->b_flags |= B_INVAL;
1681191783Srmacklem		break;
1682191783Srmacklem	    default:
1683191783Srmacklem		ncl_printf("ncl_doio:  type %x unexpected\n", vp->v_type);
1684191783Srmacklem		break;
1685191783Srmacklem	    };
1686191783Srmacklem	    if (error) {
1687191783Srmacklem		bp->b_ioflags |= BIO_ERROR;
1688191783Srmacklem		bp->b_error = error;
1689191783Srmacklem	    }
1690191783Srmacklem	} else {
1691191783Srmacklem	    /*
1692191783Srmacklem	     * If we only need to commit, try to commit
1693191783Srmacklem	     */
1694191783Srmacklem	    if (bp->b_flags & B_NEEDCOMMIT) {
1695191783Srmacklem		    int retv;
1696191783Srmacklem		    off_t off;
1697191783Srmacklem
1698191783Srmacklem		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1699191783Srmacklem		    retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1700191783Srmacklem			bp->b_wcred, td);
1701191783Srmacklem		    if (retv == 0) {
1702191783Srmacklem			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1703191783Srmacklem			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1704191783Srmacklem			    bp->b_resid = 0;
1705191783Srmacklem			    bufdone(bp);
1706191783Srmacklem			    return (0);
1707191783Srmacklem		    }
1708191783Srmacklem		    if (retv == NFSERR_STALEWRITEVERF) {
1709191783Srmacklem			    ncl_clearcommit(vp->v_mount);
1710191783Srmacklem		    }
1711191783Srmacklem	    }
1712191783Srmacklem
1713191783Srmacklem	    /*
1714191783Srmacklem	     * Setup for actual write
1715191783Srmacklem	     */
1716191783Srmacklem	    mtx_lock(&np->n_mtx);
1717191783Srmacklem	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1718191783Srmacklem		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1719191783Srmacklem	    mtx_unlock(&np->n_mtx);
1720191783Srmacklem
1721191783Srmacklem	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1722191783Srmacklem		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1723191783Srmacklem		    - bp->b_dirtyoff;
1724191783Srmacklem		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1725191783Srmacklem		    + bp->b_dirtyoff;
1726191783Srmacklem		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1727191783Srmacklem		uiop->uio_rw = UIO_WRITE;
1728191783Srmacklem		NFSINCRGLOBAL(newnfsstats.write_bios);
1729191783Srmacklem
1730191783Srmacklem		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1731191783Srmacklem		    iomode = NFSWRITE_UNSTABLE;
1732191783Srmacklem		else
1733191783Srmacklem		    iomode = NFSWRITE_FILESYNC;
1734191783Srmacklem
1735207082Srmacklem		error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1736207082Srmacklem		    called_from_strategy);
1737191783Srmacklem
1738191783Srmacklem		/*
1739191783Srmacklem		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1740191783Srmacklem		 * to cluster the buffers needing commit.  This will allow
1741191783Srmacklem		 * the system to submit a single commit rpc for the whole
1742191783Srmacklem		 * cluster.  We can do this even if the buffer is not 100%
1743191783Srmacklem		 * dirty (relative to the NFS blocksize), so we optimize the
1744191783Srmacklem		 * append-to-file-case.
1745191783Srmacklem		 *
1746191783Srmacklem		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1747191783Srmacklem		 * cleared because write clustering only works for commit
1748191783Srmacklem		 * rpc's, not for the data portion of the write).
1749191783Srmacklem		 */
1750191783Srmacklem
1751191783Srmacklem		if (!error && iomode == NFSWRITE_UNSTABLE) {
1752191783Srmacklem		    bp->b_flags |= B_NEEDCOMMIT;
1753191783Srmacklem		    if (bp->b_dirtyoff == 0
1754191783Srmacklem			&& bp->b_dirtyend == bp->b_bcount)
1755191783Srmacklem			bp->b_flags |= B_CLUSTEROK;
1756191783Srmacklem		} else {
1757191783Srmacklem		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1758191783Srmacklem		}
1759191783Srmacklem
1760191783Srmacklem		/*
1761191783Srmacklem		 * For an interrupted write, the buffer is still valid
1762191783Srmacklem		 * and the write hasn't been pushed to the server yet,
1763191783Srmacklem		 * so we can't set BIO_ERROR and report the interruption
1764191783Srmacklem		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1765191783Srmacklem		 * is not relevant, so the rpc attempt is essentially
1766191783Srmacklem		 * a noop.  For the case of a V3 write rpc not being
1767191783Srmacklem		 * committed to stable storage, the block is still
1768191783Srmacklem		 * dirty and requires either a commit rpc or another
1769191783Srmacklem		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1770191783Srmacklem		 * the block is reused. This is indicated by setting
1771191783Srmacklem		 * the B_DELWRI and B_NEEDCOMMIT flags.
1772191783Srmacklem		 *
1773207082Srmacklem		 * EIO is returned by ncl_writerpc() to indicate a recoverable
1774207082Srmacklem		 * write error and is handled as above, except that
1775207082Srmacklem		 * B_EINTR isn't set. One cause of this is a stale stateid
1776207082Srmacklem		 * error for the RPC that indicates recovery is required,
1777207082Srmacklem		 * when called with called_from_strategy != 0.
1778207082Srmacklem		 *
1779191783Srmacklem		 * If the buffer is marked B_PAGING, it does not reside on
1780191783Srmacklem		 * the vp's paging queues so we cannot call bdirty().  The
1781191783Srmacklem		 * bp in this case is not an NFS cache block so we should
1782191783Srmacklem		 * be safe. XXX
1783191783Srmacklem		 *
1784249077Skib		 * The logic below breaks up errors into recoverable and
1785191783Srmacklem		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1786191783Srmacklem		 * and keep the buffer around for potential write retries.
1787191783Srmacklem		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1788249077Skib		 * and save the error in the nfsnode. This is less than ideal
1789191783Srmacklem		 * but necessary. Keeping such buffers around could potentially
1790191783Srmacklem		 * cause buffer exhaustion eventually (they can never be written
1791191783Srmacklem		 * out, so will get constantly be re-dirtied). It also causes
1792249077Skib		 * all sorts of vfs panics. For non-recoverable write errors,
1793191783Srmacklem		 * also invalidate the attrcache, so we'll be forced to go over
1794191783Srmacklem		 * the wire for this object, returning an error to user on next
1795191783Srmacklem		 * call (most of the time).
1796191783Srmacklem		 */
1797249077Skib		if (error == EINTR || error == EIO || error == ETIMEDOUT
1798191783Srmacklem		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1799191783Srmacklem			int s;
1800191783Srmacklem
1801191783Srmacklem			s = splbio();
1802191783Srmacklem			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1803191783Srmacklem			if ((bp->b_flags & B_PAGING) == 0) {
1804191783Srmacklem			    bdirty(bp);
1805191783Srmacklem			    bp->b_flags &= ~B_DONE;
1806191783Srmacklem			}
1807207082Srmacklem			if ((error == EINTR || error == ETIMEDOUT) &&
1808207082Srmacklem			    (bp->b_flags & B_ASYNC) == 0)
1809191783Srmacklem			    bp->b_flags |= B_EINTR;
1810191783Srmacklem			splx(s);
1811249077Skib		} else {
1812191783Srmacklem		    if (error) {
1813191783Srmacklem			bp->b_ioflags |= BIO_ERROR;
1814191783Srmacklem			bp->b_flags |= B_INVAL;
1815191783Srmacklem			bp->b_error = np->n_error = error;
1816191783Srmacklem			mtx_lock(&np->n_mtx);
1817191783Srmacklem			np->n_flag |= NWRITEERR;
1818191783Srmacklem			np->n_attrstamp = 0;
1819223280Srmacklem			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1820191783Srmacklem			mtx_unlock(&np->n_mtx);
1821191783Srmacklem		    }
1822191783Srmacklem		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1823191783Srmacklem		}
1824191783Srmacklem	    } else {
1825191783Srmacklem		bp->b_resid = 0;
1826191783Srmacklem		bufdone(bp);
1827191783Srmacklem		return (0);
1828191783Srmacklem	    }
1829191783Srmacklem	}
1830191783Srmacklem	bp->b_resid = uiop->uio_resid;
1831191783Srmacklem	if (must_commit)
1832191783Srmacklem	    ncl_clearcommit(vp->v_mount);
1833191783Srmacklem	bufdone(bp);
1834191783Srmacklem	return (error);
1835191783Srmacklem}
1836191783Srmacklem
1837191783Srmacklem/*
1838191783Srmacklem * Used to aid in handling ftruncate() operations on the NFS client side.
1839191783Srmacklem * Truncation creates a number of special problems for NFS.  We have to
1840191783Srmacklem * throw away VM pages and buffer cache buffers that are beyond EOF, and
1841191783Srmacklem * we have to properly handle VM pages or (potentially dirty) buffers
1842191783Srmacklem * that straddle the truncation point.
1843191783Srmacklem */
1844191783Srmacklem
1845191783Srmacklemint
1846191783Srmacklemncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1847191783Srmacklem{
1848191783Srmacklem	struct nfsnode *np = VTONFS(vp);
1849191783Srmacklem	u_quad_t tsize;
1850231330Srmacklem	int biosize = vp->v_bufobj.bo_bsize;
1851191783Srmacklem	int error = 0;
1852191783Srmacklem
1853191783Srmacklem	mtx_lock(&np->n_mtx);
1854191783Srmacklem	tsize = np->n_size;
1855191783Srmacklem	np->n_size = nsize;
1856191783Srmacklem	mtx_unlock(&np->n_mtx);
1857191783Srmacklem
1858191783Srmacklem	if (nsize < tsize) {
1859191783Srmacklem		struct buf *bp;
1860191783Srmacklem		daddr_t lbn;
1861191783Srmacklem		int bufsize;
1862191783Srmacklem
1863191783Srmacklem		/*
1864249077Skib		 * vtruncbuf() doesn't get the buffer overlapping the
1865191783Srmacklem		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1866191783Srmacklem		 * buffer that now needs to be truncated.
1867191783Srmacklem		 */
1868191783Srmacklem		error = vtruncbuf(vp, cred, td, nsize, biosize);
1869191783Srmacklem		lbn = nsize / biosize;
1870191783Srmacklem		bufsize = nsize & (biosize - 1);
1871191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1872249077Skib		if (!bp)
1873249077Skib			return EINTR;
1874191783Srmacklem		if (bp->b_dirtyoff > bp->b_bcount)
1875191783Srmacklem			bp->b_dirtyoff = bp->b_bcount;
1876191783Srmacklem		if (bp->b_dirtyend > bp->b_bcount)
1877191783Srmacklem			bp->b_dirtyend = bp->b_bcount;
1878191783Srmacklem		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1879191783Srmacklem		brelse(bp);
1880191783Srmacklem	} else {
1881191783Srmacklem		vnode_pager_setsize(vp, nsize);
1882191783Srmacklem	}
1883191783Srmacklem	return(error);
1884191783Srmacklem}
1885191783Srmacklem
1886