nfs_bio.c revision 126853
11541Srgrimes/* 21541Srgrimes * Copyright (c) 1989, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * Rick Macklem at The University of Guelph. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 3. All advertising materials mentioning features or use of this software 171541Srgrimes * must display the following acknowledgement: 181541Srgrimes * This product includes software developed by the University of 191541Srgrimes * California, Berkeley and its contributors. 201541Srgrimes * 4. Neither the name of the University nor the names of its contributors 211541Srgrimes * may be used to endorse or promote products derived from this software 221541Srgrimes * without specific prior written permission. 231541Srgrimes * 241541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 251541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 261541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 271541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 281541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 291541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 301541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 311541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 321541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 331541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 341541Srgrimes * SUCH DAMAGE. 351541Srgrimes * 3622521Sdyson * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 371541Srgrimes */ 381541Srgrimes 3983651Speter#include <sys/cdefs.h> 4083654Speter__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 126853 2004-03-11 18:02:36Z phk $"); 4122521Sdyson 421541Srgrimes#include <sys/param.h> 431541Srgrimes#include <sys/systm.h> 4479247Sjhb#include <sys/bio.h> 4579247Sjhb#include <sys/buf.h> 4679247Sjhb#include <sys/kernel.h> 4779247Sjhb#include <sys/mount.h> 4879247Sjhb#include <sys/proc.h> 491541Srgrimes#include <sys/resourcevar.h> 503305Sphk#include <sys/signalvar.h> 5179247Sjhb#include <sys/vmmeter.h> 521541Srgrimes#include <sys/vnode.h> 531541Srgrimes 541541Srgrimes#include <vm/vm.h> 5512662Sdg#include <vm/vm_extern.h> 5625930Sdfr#include <vm/vm_page.h> 5725930Sdfr#include <vm/vm_object.h> 5825930Sdfr#include <vm/vm_pager.h> 5925930Sdfr#include <vm/vnode_pager.h> 601541Srgrimes 61122698Salfred#include <rpc/rpcclnt.h> 62122698Salfred 631541Srgrimes#include <nfs/rpcv2.h> 649336Sdfr#include <nfs/nfsproto.h> 6583651Speter#include <nfsclient/nfs.h> 6683651Speter#include <nfsclient/nfsmount.h> 6783651Speter#include <nfsclient/nfsnode.h> 681541Srgrimes 69122698Salfred#include <nfs4client/nfs4.h> 70122698Salfred 7175580Sphk/* 7275580Sphk * Just call nfs_writebp() with the force argument set to 1. 7375580Sphk * 7475580Sphk * NOTE: B_DONE may or may not be set in a_bp on call. 7575580Sphk */ 7675580Sphkstatic int 77122698Salfrednfs4_bwrite(struct buf *bp) 78122698Salfred{ 79122698Salfred 80122698Salfred return (nfs4_writebp(bp, 1, curthread)); 81122698Salfred} 82122698Salfred 83122698Salfredstatic int 8475580Sphknfs_bwrite(struct buf *bp) 8575580Sphk{ 8683651Speter 8783366Sjulian return (nfs_writebp(bp, 1, curthread)); 8875580Sphk} 8975580Sphk 90122698Salfredstruct buf_ops buf_ops_nfs4 = { 91122698Salfred "buf_ops_nfs4", 92122698Salfred nfs4_bwrite 93122698Salfred}; 94122698Salfred 9575580Sphkstruct buf_ops buf_ops_nfs = { 9675580Sphk "buf_ops_nfs", 9775580Sphk nfs_bwrite 9875580Sphk}; 9975580Sphk 10083651Speterstatic struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 10183651Speter struct thread *td); 10275580Sphk 1031541Srgrimes/* 10425930Sdfr * Vnode op for VM getpages. 10525930Sdfr */ 10625930Sdfrint 10783651Speternfs_getpages(struct vop_getpages_args *ap) 10825930Sdfr{ 10946349Salc int i, error, nextoff, size, toff, count, npages; 11032755Sdyson struct uio uio; 11132755Sdyson struct iovec iov; 11232755Sdyson vm_offset_t kva; 11334206Sdyson struct buf *bp; 11436563Speter struct vnode *vp; 11583366Sjulian struct thread *td; 11636563Speter struct ucred *cred; 11736563Speter struct nfsmount *nmp; 118116461Salc vm_object_t object; 11936563Speter vm_page_t *pages; 12025930Sdfr 12179224Sdillon GIANT_REQUIRED; 12279224Sdillon 12336563Speter vp = ap->a_vp; 12483366Sjulian td = curthread; /* XXX */ 12591406Sjhb cred = curthread->td_ucred; /* XXX */ 12636563Speter nmp = VFSTONFS(vp->v_mount); 12736563Speter pages = ap->a_m; 12836563Speter count = ap->a_count; 12936563Speter 130116461Salc if ((object = vp->v_object) == NULL) { 13132286Sdyson printf("nfs_getpages: called with non-merged cache vnode??\n"); 13236563Speter return VM_PAGER_ERROR; 13325930Sdfr } 13425930Sdfr 13536563Speter if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 13676827Salfred (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 137122698Salfred /* We'll never get here for v4, because we always have fsinfo */ 13883366Sjulian (void)nfs_fsinfo(nmp, vp, cred, td); 13976827Salfred } 14046349Salc 14146349Salc npages = btoc(count); 14246349Salc 14334206Sdyson /* 14446349Salc * If the requested page is partially valid, just return it and 14546349Salc * allow the pager to zero-out the blanks. Partially valid pages 14646349Salc * can only occur at the file EOF. 14746349Salc */ 14846349Salc 14946349Salc { 15046349Salc vm_page_t m = pages[ap->a_reqpage]; 15146349Salc 152116461Salc VM_OBJECT_LOCK(object); 153100450Salc vm_page_lock_queues(); 15446349Salc if (m->valid != 0) { 15546349Salc /* handled by vm_fault now */ 15646349Salc /* vm_page_zero_invalid(m, TRUE); */ 15746349Salc for (i = 0; i < npages; ++i) { 15846349Salc if (i != ap->a_reqpage) 15975692Salfred vm_page_free(pages[i]); 16046349Salc } 161100450Salc vm_page_unlock_queues(); 162116461Salc VM_OBJECT_UNLOCK(object); 16346349Salc return(0); 16446349Salc } 165100450Salc vm_page_unlock_queues(); 166116461Salc VM_OBJECT_UNLOCK(object); 16746349Salc } 16846349Salc 16946349Salc /* 17034206Sdyson * We use only the kva address for the buffer, but this is extremely 17134206Sdyson * convienient and fast. 17234206Sdyson */ 17342957Sdillon bp = getpbuf(&nfs_pbuf_freecnt); 17425930Sdfr 17534206Sdyson kva = (vm_offset_t) bp->b_data; 17636563Speter pmap_qenter(kva, pages, npages); 17779247Sjhb cnt.v_vnodein++; 17879247Sjhb cnt.v_vnodepgsin += npages; 17934206Sdyson 18032755Sdyson iov.iov_base = (caddr_t) kva; 18136563Speter iov.iov_len = count; 18232755Sdyson uio.uio_iov = &iov; 18332755Sdyson uio.uio_iovcnt = 1; 18436563Speter uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 18536563Speter uio.uio_resid = count; 18632755Sdyson uio.uio_segflg = UIO_SYSSPACE; 18732755Sdyson uio.uio_rw = UIO_READ; 18883366Sjulian uio.uio_td = td; 18925930Sdfr 190122953Salfred error = (nmp->nm_rpcops->nr_readrpc)(vp, &uio, cred); 19134206Sdyson pmap_qremove(kva, npages); 19232755Sdyson 19342957Sdillon relpbuf(bp, &nfs_pbuf_freecnt); 19434206Sdyson 19542957Sdillon if (error && (uio.uio_resid == count)) { 19642957Sdillon printf("nfs_getpages: error %d\n", error); 197116461Salc VM_OBJECT_LOCK(object); 198100450Salc vm_page_lock_queues(); 19942957Sdillon for (i = 0; i < npages; ++i) { 20042957Sdillon if (i != ap->a_reqpage) 20175692Salfred vm_page_free(pages[i]); 20242957Sdillon } 203100450Salc vm_page_unlock_queues(); 204116461Salc VM_OBJECT_UNLOCK(object); 20534206Sdyson return VM_PAGER_ERROR; 20642957Sdillon } 20734206Sdyson 20845347Sjulian /* 20945347Sjulian * Calculate the number of bytes read and validate only that number 21045347Sjulian * of bytes. Note that due to pending writes, size may be 0. This 21145347Sjulian * does not mean that the remaining data is invalid! 21245347Sjulian */ 21345347Sjulian 21436563Speter size = count - uio.uio_resid; 215116461Salc VM_OBJECT_LOCK(object); 216100450Salc vm_page_lock_queues(); 21734206Sdyson for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 21834206Sdyson vm_page_t m; 21934206Sdyson nextoff = toff + PAGE_SIZE; 22036563Speter m = pages[i]; 22134206Sdyson 22234206Sdyson m->flags &= ~PG_ZERO; 22334206Sdyson 22434206Sdyson if (nextoff <= size) { 22545347Sjulian /* 22645347Sjulian * Read operation filled an entire page 22745347Sjulian */ 22834206Sdyson m->valid = VM_PAGE_BITS_ALL; 22949945Salc vm_page_undirty(m); 23045347Sjulian } else if (size > toff) { 23145347Sjulian /* 23246349Salc * Read operation filled a partial page. 23345347Sjulian */ 23446349Salc m->valid = 0; 23545347Sjulian vm_page_set_validclean(m, 0, size - toff); 23646349Salc /* handled by vm_fault now */ 23746349Salc /* vm_page_zero_invalid(m, TRUE); */ 23887834Sdillon } else { 23987834Sdillon /* 24087834Sdillon * Read operation was short. If no error occured 24187834Sdillon * we may have hit a zero-fill section. We simply 24287834Sdillon * leave valid set to 0. 24387834Sdillon */ 24487834Sdillon ; 24534206Sdyson } 24625930Sdfr if (i != ap->a_reqpage) { 24734206Sdyson /* 24834206Sdyson * Whether or not to leave the page activated is up in 24934206Sdyson * the air, but we should put the page on a page queue 25034206Sdyson * somewhere (it already is in the object). Result: 25134206Sdyson * It appears that emperical results show that 25234206Sdyson * deactivating pages is best. 25334206Sdyson */ 25434206Sdyson 25534206Sdyson /* 25634206Sdyson * Just in case someone was asking for this page we 25734206Sdyson * now tell them that it is ok to use. 25834206Sdyson */ 25934206Sdyson if (!error) { 26034206Sdyson if (m->flags & PG_WANTED) 26134206Sdyson vm_page_activate(m); 26234206Sdyson else 26334206Sdyson vm_page_deactivate(m); 26438799Sdfr vm_page_wakeup(m); 26534206Sdyson } else { 26675692Salfred vm_page_free(m); 26734206Sdyson } 26825930Sdfr } 26925930Sdfr } 270100450Salc vm_page_unlock_queues(); 271116461Salc VM_OBJECT_UNLOCK(object); 27225930Sdfr return 0; 27325930Sdfr} 27425930Sdfr 27525930Sdfr/* 27634206Sdyson * Vnode op for VM putpages. 27734096Smsmith */ 27834096Smsmithint 27983651Speternfs_putpages(struct vop_putpages_args *ap) 28034096Smsmith{ 28134206Sdyson struct uio uio; 28234206Sdyson struct iovec iov; 28334206Sdyson vm_offset_t kva; 28434206Sdyson struct buf *bp; 28536563Speter int iomode, must_commit, i, error, npages, count; 28646349Salc off_t offset; 28734206Sdyson int *rtvals; 28836563Speter struct vnode *vp; 28983366Sjulian struct thread *td; 29036563Speter struct ucred *cred; 29136563Speter struct nfsmount *nmp; 29246349Salc struct nfsnode *np; 29336563Speter vm_page_t *pages; 29434206Sdyson 29579224Sdillon GIANT_REQUIRED; 29679224Sdillon 29736563Speter vp = ap->a_vp; 29846349Salc np = VTONFS(vp); 29983366Sjulian td = curthread; /* XXX */ 30091406Sjhb cred = curthread->td_ucred; /* XXX */ 30136563Speter nmp = VFSTONFS(vp->v_mount); 30236563Speter pages = ap->a_m; 30336563Speter count = ap->a_count; 30434206Sdyson rtvals = ap->a_rtvals; 30536563Speter npages = btoc(count); 30646349Salc offset = IDX_TO_OFF(pages[0]->pindex); 30734206Sdyson 30836563Speter if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 30976827Salfred (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 31083366Sjulian (void)nfs_fsinfo(nmp, vp, cred, td); 31176827Salfred } 31234206Sdyson 31383651Speter for (i = 0; i < npages; i++) 31434206Sdyson rtvals[i] = VM_PAGER_AGAIN; 31534206Sdyson 31634206Sdyson /* 31746349Salc * When putting pages, do not extend file past EOF. 31846349Salc */ 31946349Salc 32046349Salc if (offset + count > np->n_size) { 32146349Salc count = np->n_size - offset; 32246349Salc if (count < 0) 32346349Salc count = 0; 32446349Salc } 32546349Salc 32646349Salc /* 32734206Sdyson * We use only the kva address for the buffer, but this is extremely 32834206Sdyson * convienient and fast. 32934206Sdyson */ 33042957Sdillon bp = getpbuf(&nfs_pbuf_freecnt); 33134206Sdyson 33234206Sdyson kva = (vm_offset_t) bp->b_data; 33336563Speter pmap_qenter(kva, pages, npages); 33479247Sjhb cnt.v_vnodeout++; 33579247Sjhb cnt.v_vnodepgsout += count; 33634206Sdyson 33734206Sdyson iov.iov_base = (caddr_t) kva; 33836563Speter iov.iov_len = count; 33934206Sdyson uio.uio_iov = &iov; 34034206Sdyson uio.uio_iovcnt = 1; 34146349Salc uio.uio_offset = offset; 34236563Speter uio.uio_resid = count; 34334206Sdyson uio.uio_segflg = UIO_SYSSPACE; 34434206Sdyson uio.uio_rw = UIO_WRITE; 34583366Sjulian uio.uio_td = td; 34634206Sdyson 34734206Sdyson if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 34834206Sdyson iomode = NFSV3WRITE_UNSTABLE; 34934206Sdyson else 35034206Sdyson iomode = NFSV3WRITE_FILESYNC; 35134206Sdyson 352122953Salfred error = (nmp->nm_rpcops->nr_writerpc)(vp, &uio, cred, &iomode, &must_commit); 35334206Sdyson 35434206Sdyson pmap_qremove(kva, npages); 35542957Sdillon relpbuf(bp, &nfs_pbuf_freecnt); 35634206Sdyson 35734206Sdyson if (!error) { 35836563Speter int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 35934206Sdyson for (i = 0; i < nwritten; i++) { 36034206Sdyson rtvals[i] = VM_PAGER_OK; 36149945Salc vm_page_undirty(pages[i]); 36234206Sdyson } 36376827Salfred if (must_commit) { 36436563Speter nfs_clearcommit(vp->v_mount); 36576827Salfred } 36634206Sdyson } 36736563Speter return rtvals[0]; 36834096Smsmith} 36934096Smsmith 37034096Smsmith/* 3711541Srgrimes * Vnode op for read using bio 3721541Srgrimes */ 3731549Srgrimesint 37483651Speternfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 3751541Srgrimes{ 37683651Speter struct nfsnode *np = VTONFS(vp); 37783651Speter int biosize, i; 3781549Srgrimes struct buf *bp = 0, *rabp; 3791541Srgrimes struct vattr vattr; 38083366Sjulian struct thread *td; 3819336Sdfr struct nfsmount *nmp = VFSTONFS(vp->v_mount); 3825455Sdg daddr_t lbn, rabn; 38346349Salc int bcount; 38451344Sdillon int seqcount; 38546349Salc int nra, error = 0, n = 0, on = 0; 3861541Srgrimes 3871541Srgrimes#ifdef DIAGNOSTIC 3881541Srgrimes if (uio->uio_rw != UIO_READ) 3891541Srgrimes panic("nfs_read mode"); 3901541Srgrimes#endif 3911541Srgrimes if (uio->uio_resid == 0) 3921541Srgrimes return (0); 39336473Speter if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 3941541Srgrimes return (EINVAL); 39583366Sjulian td = uio->uio_td; 39651344Sdillon 39736176Speter if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 39836176Speter (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 39983366Sjulian (void)nfs_fsinfo(nmp, vp, cred, td); 40036473Speter if (vp->v_type != VDIR && 40136473Speter (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 40236473Speter return (EFBIG); 4039428Sdfr biosize = vp->v_mount->mnt_stat.f_iosize; 404108357Sdillon seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 4051541Srgrimes /* 4061541Srgrimes * For nfs, cache consistency can only be maintained approximately. 4071541Srgrimes * Although RFC1094 does not specify the criteria, the following is 4081541Srgrimes * believed to be compatible with the reference port. 4091541Srgrimes * For nfs: 4101541Srgrimes * If the file's modify time on the server has changed since the 4111541Srgrimes * last read rpc or you have written to the file, 4121541Srgrimes * you may have lost data cache consistency with the 4131541Srgrimes * server, so flush all of the file's data out of the cache. 4141541Srgrimes * Then force a getattr rpc to ensure that you have up to date 4151541Srgrimes * attributes. 4161541Srgrimes * NB: This implies that cache data can be read when up to 4171541Srgrimes * NFS_ATTRTIMEO seconds out of date. If you find that you need current 4181541Srgrimes * attributes this could be forced by setting n_attrstamp to 0 before 4191541Srgrimes * the VOP_GETATTR() call. 4201541Srgrimes */ 42183651Speter if (np->n_flag & NMODIFIED) { 42283651Speter if (vp->v_type != VREG) { 42383651Speter if (vp->v_type != VDIR) 42483651Speter panic("nfs: bioread, not dir"); 425122953Salfred (nmp->nm_rpcops->nr_invaldir)(vp); 42683651Speter error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 4273305Sphk if (error) 4281541Srgrimes return (error); 4291541Srgrimes } 43083651Speter np->n_attrstamp = 0; 43183651Speter error = VOP_GETATTR(vp, &vattr, cred, td); 43283651Speter if (error) 4331541Srgrimes return (error); 43483651Speter np->n_mtime = vattr.va_mtime.tv_sec; 43583651Speter } else { 43683651Speter error = VOP_GETATTR(vp, &vattr, cred, td); 43783651Speter if (error) 43883651Speter return (error); 43983651Speter if (np->n_mtime != vattr.va_mtime.tv_sec) { 4409336Sdfr if (vp->v_type == VDIR) 441122953Salfred (nmp->nm_rpcops->nr_invaldir)(vp); 44283366Sjulian error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 4433305Sphk if (error) 44483651Speter return (error); 44583651Speter np->n_mtime = vattr.va_mtime.tv_sec; 4461541Srgrimes } 44783651Speter } 44883651Speter do { 4491541Srgrimes switch (vp->v_type) { 4501541Srgrimes case VREG: 4511541Srgrimes nfsstats.biocache_reads++; 4521541Srgrimes lbn = uio->uio_offset / biosize; 4539336Sdfr on = uio->uio_offset & (biosize - 1); 4541541Srgrimes 4551541Srgrimes /* 4561541Srgrimes * Start the read ahead(s), as required. 4571541Srgrimes */ 45889324Speter if (nmp->nm_readahead > 0) { 45951344Sdillon for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 46013612Smpp (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 4615455Sdg rabn = lbn + 1 + nra; 46299737Sdillon if (incore(vp, rabn) == NULL) { 46383366Sjulian rabp = nfs_getcacheblk(vp, rabn, biosize, td); 4641541Srgrimes if (!rabp) 4651541Srgrimes return (EINTR); 4668692Sdg if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 46758345Sphk rabp->b_flags |= B_ASYNC; 46858345Sphk rabp->b_iocmd = BIO_READ; 4695455Sdg vfs_busy_pages(rabp, 0); 47083366Sjulian if (nfs_asyncio(rabp, cred, td)) { 47158934Sphk rabp->b_flags |= B_INVAL; 47258934Sphk rabp->b_ioflags |= BIO_ERROR; 4735455Sdg vfs_unbusy_pages(rabp); 4741541Srgrimes brelse(rabp); 47555431Sdillon break; 4761541Srgrimes } 47755431Sdillon } else { 4785471Sdg brelse(rabp); 47955431Sdillon } 4801541Srgrimes } 4811541Srgrimes } 4821541Srgrimes } 4831541Srgrimes 4841541Srgrimes /* 48546349Salc * Obtain the buffer cache block. Figure out the buffer size 48654605Sdillon * when we are at EOF. If we are modifying the size of the 48783651Speter * buffer based on an EOF condition we need to hold 48854605Sdillon * nfs_rslock() through obtaining the buffer to prevent 48954605Sdillon * a potential writer-appender from messing with n_size. 49054605Sdillon * Otherwise we may accidently truncate the buffer and 49154605Sdillon * lose dirty data. 49246349Salc * 49346349Salc * Note that bcount is *not* DEV_BSIZE aligned. 4941541Srgrimes */ 49546349Salc 49654605Sdillonagain: 49746349Salc bcount = biosize; 49846349Salc if ((off_t)lbn * biosize >= np->n_size) { 49946349Salc bcount = 0; 50046349Salc } else if ((off_t)(lbn + 1) * biosize > np->n_size) { 50146349Salc bcount = np->n_size - (off_t)lbn * biosize; 5028692Sdg } 50355431Sdillon if (bcount != biosize) { 50483366Sjulian switch(nfs_rslock(np, td)) { 50555431Sdillon case ENOLCK: 50655431Sdillon goto again; 50755431Sdillon /* not reached */ 50855431Sdillon case EINTR: 50955431Sdillon case ERESTART: 51055431Sdillon return(EINTR); 51155431Sdillon /* not reached */ 51255431Sdillon default: 51355431Sdillon break; 51455431Sdillon } 51555431Sdillon } 51646349Salc 51783366Sjulian bp = nfs_getcacheblk(vp, lbn, bcount, td); 51854605Sdillon 51954605Sdillon if (bcount != biosize) 52083366Sjulian nfs_rsunlock(np, td); 5217871Sdg if (!bp) 5227871Sdg return (EINTR); 52342957Sdillon 52425930Sdfr /* 52546349Salc * If B_CACHE is not set, we must issue the read. If this 52646349Salc * fails, we return an error. 52725930Sdfr */ 52846349Salc 5297871Sdg if ((bp->b_flags & B_CACHE) == 0) { 53058345Sphk bp->b_iocmd = BIO_READ; 53132755Sdyson vfs_busy_pages(bp, 0); 53283366Sjulian error = nfs_doio(bp, cred, td); 53332755Sdyson if (error) { 53432755Sdyson brelse(bp); 53532755Sdyson return (error); 53632755Sdyson } 5371541Srgrimes } 53846349Salc 53946349Salc /* 54046349Salc * on is the offset into the current bp. Figure out how many 54146349Salc * bytes we can copy out of the bp. Note that bcount is 54246349Salc * NOT DEV_BSIZE aligned. 54346349Salc * 54446349Salc * Then figure out how many bytes we can copy into the uio. 54546349Salc */ 54646349Salc 54746349Salc n = 0; 54846349Salc if (on < bcount) 54946349Salc n = min((unsigned)(bcount - on), uio->uio_resid); 5501541Srgrimes break; 5511541Srgrimes case VLNK: 5521541Srgrimes nfsstats.biocache_readlinks++; 55383366Sjulian bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 5541541Srgrimes if (!bp) 5551541Srgrimes return (EINTR); 5567871Sdg if ((bp->b_flags & B_CACHE) == 0) { 55758345Sphk bp->b_iocmd = BIO_READ; 55832755Sdyson vfs_busy_pages(bp, 0); 55983366Sjulian error = nfs_doio(bp, cred, td); 56032755Sdyson if (error) { 56158934Sphk bp->b_ioflags |= BIO_ERROR; 56232755Sdyson brelse(bp); 56332755Sdyson return (error); 56432755Sdyson } 5651541Srgrimes } 5661541Srgrimes n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 5671541Srgrimes on = 0; 5681541Srgrimes break; 5691541Srgrimes case VDIR: 5701541Srgrimes nfsstats.biocache_readdirs++; 57124577Sdfr if (np->n_direofoffset 57224577Sdfr && uio->uio_offset >= np->n_direofoffset) { 57324577Sdfr return (0); 57424577Sdfr } 57536979Sbde lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 5769336Sdfr on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 57783366Sjulian bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 5781541Srgrimes if (!bp) 5799336Sdfr return (EINTR); 5807871Sdg if ((bp->b_flags & B_CACHE) == 0) { 58158345Sphk bp->b_iocmd = BIO_READ; 5829336Sdfr vfs_busy_pages(bp, 0); 58383366Sjulian error = nfs_doio(bp, cred, td); 58432912Stegge if (error) { 58532912Stegge brelse(bp); 58632912Stegge } 58732755Sdyson while (error == NFSERR_BAD_COOKIE) { 58846349Salc printf("got bad cookie vp %p bp %p\n", vp, bp); 589122953Salfred (nmp->nm_rpcops->nr_invaldir)(vp); 59083366Sjulian error = nfs_vinvalbuf(vp, 0, cred, td, 1); 59132755Sdyson /* 59232755Sdyson * Yuck! The directory has been modified on the 59332755Sdyson * server. The only way to get the block is by 59432755Sdyson * reading from the beginning to get all the 59532755Sdyson * offset cookies. 59646349Salc * 59746349Salc * Leave the last bp intact unless there is an error. 59846349Salc * Loop back up to the while if the error is another 59946349Salc * NFSERR_BAD_COOKIE (double yuch!). 60032755Sdyson */ 60132755Sdyson for (i = 0; i <= lbn && !error; i++) { 60232755Sdyson if (np->n_direofoffset 60332755Sdyson && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 60424577Sdfr return (0); 60583366Sjulian bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 60632755Sdyson if (!bp) 60732755Sdyson return (EINTR); 60846349Salc if ((bp->b_flags & B_CACHE) == 0) { 60958345Sphk bp->b_iocmd = BIO_READ; 61046349Salc vfs_busy_pages(bp, 0); 61183366Sjulian error = nfs_doio(bp, cred, td); 61246349Salc /* 61346349Salc * no error + B_INVAL == directory EOF, 61446349Salc * use the block. 61546349Salc */ 61646349Salc if (error == 0 && (bp->b_flags & B_INVAL)) 61746349Salc break; 61846349Salc } 61946349Salc /* 62046349Salc * An error will throw away the block and the 62146349Salc * for loop will break out. If no error and this 62246349Salc * is not the block we want, we throw away the 62346349Salc * block and go for the next one via the for loop. 62446349Salc */ 62546349Salc if (error || i < lbn) 62632755Sdyson brelse(bp); 6271541Srgrimes } 62832912Stegge } 62946349Salc /* 63046349Salc * The above while is repeated if we hit another cookie 63146349Salc * error. If we hit an error and it wasn't a cookie error, 63246349Salc * we give up. 63346349Salc */ 63432912Stegge if (error) 6359336Sdfr return (error); 6361541Srgrimes } 6371541Srgrimes 6381541Srgrimes /* 6391541Srgrimes * If not eof and read aheads are enabled, start one. 6401541Srgrimes * (You need the current block first, so that you have the 6419336Sdfr * directory offset cookie of the next block.) 6421541Srgrimes */ 64389324Speter if (nmp->nm_readahead > 0 && 64439782Smckusick (bp->b_flags & B_INVAL) == 0 && 6459336Sdfr (np->n_direofoffset == 0 || 6469336Sdfr (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 64799737Sdillon incore(vp, lbn + 1) == NULL) { 64883366Sjulian rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 6491541Srgrimes if (rabp) { 6508692Sdg if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 65158345Sphk rabp->b_flags |= B_ASYNC; 65258345Sphk rabp->b_iocmd = BIO_READ; 6535455Sdg vfs_busy_pages(rabp, 0); 65483366Sjulian if (nfs_asyncio(rabp, cred, td)) { 65558934Sphk rabp->b_flags |= B_INVAL; 65658934Sphk rabp->b_ioflags |= BIO_ERROR; 6575455Sdg vfs_unbusy_pages(rabp); 6581541Srgrimes brelse(rabp); 6591541Srgrimes } 6605471Sdg } else { 6615471Sdg brelse(rabp); 6621541Srgrimes } 6631541Srgrimes } 6641541Srgrimes } 66526469Sdfr /* 66646349Salc * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 66746349Salc * chopped for the EOF condition, we cannot tell how large 66846349Salc * NFS directories are going to be until we hit EOF. So 66946349Salc * an NFS directory buffer is *not* chopped to its EOF. Now, 67046349Salc * it just so happens that b_resid will effectively chop it 67146349Salc * to EOF. *BUT* this information is lost if the buffer goes 67246349Salc * away and is reconstituted into a B_CACHE state ( due to 67346349Salc * being VMIO ) later. So we keep track of the directory eof 67483651Speter * in np->n_direofoffset and chop it off as an extra step 67546349Salc * right here. 67626469Sdfr */ 67726469Sdfr n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 67846349Salc if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 67946349Salc n = np->n_direofoffset - uio->uio_offset; 6801541Srgrimes break; 6813305Sphk default: 68283651Speter printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 6833305Sphk break; 6841541Srgrimes }; 6851541Srgrimes 6861541Srgrimes if (n > 0) { 68734206Sdyson error = uiomove(bp->b_data + on, (int)n, uio); 6881541Srgrimes } 6891541Srgrimes switch (vp->v_type) { 6901541Srgrimes case VREG: 6911541Srgrimes break; 6921541Srgrimes case VLNK: 6931541Srgrimes n = 0; 6941541Srgrimes break; 6951541Srgrimes case VDIR: 6961541Srgrimes break; 6973305Sphk default: 69883651Speter printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 6993305Sphk } 70032755Sdyson brelse(bp); 7011541Srgrimes } while (error == 0 && uio->uio_resid > 0 && n > 0); 7021541Srgrimes return (error); 7031541Srgrimes} 7041541Srgrimes 7051541Srgrimes/* 7061541Srgrimes * Vnode op for write using bio 7071541Srgrimes */ 7081549Srgrimesint 70983651Speternfs_write(struct vop_write_args *ap) 7101541Srgrimes{ 71146349Salc int biosize; 71246349Salc struct uio *uio = ap->a_uio; 71383366Sjulian struct thread *td = uio->uio_td; 71446349Salc struct vnode *vp = ap->a_vp; 7151541Srgrimes struct nfsnode *np = VTONFS(vp); 71646349Salc struct ucred *cred = ap->a_cred; 7171541Srgrimes int ioflag = ap->a_ioflag; 7181541Srgrimes struct buf *bp; 7191541Srgrimes struct vattr vattr; 7209336Sdfr struct nfsmount *nmp = VFSTONFS(vp->v_mount); 72111921Sphk daddr_t lbn; 72246349Salc int bcount; 72383651Speter int n, on, error = 0; 72454605Sdillon int haverslock = 0; 72583366Sjulian struct proc *p = td?td->td_proc:NULL; 7261541Srgrimes 72779224Sdillon GIANT_REQUIRED; 72879224Sdillon 7291541Srgrimes#ifdef DIAGNOSTIC 7301541Srgrimes if (uio->uio_rw != UIO_WRITE) 7311541Srgrimes panic("nfs_write mode"); 73283366Sjulian if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) 7331541Srgrimes panic("nfs_write proc"); 7341541Srgrimes#endif 7351541Srgrimes if (vp->v_type != VREG) 7361541Srgrimes return (EIO); 7371541Srgrimes if (np->n_flag & NWRITEERR) { 7381541Srgrimes np->n_flag &= ~NWRITEERR; 7391541Srgrimes return (np->n_error); 7401541Srgrimes } 74136176Speter if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 74236176Speter (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 74383366Sjulian (void)nfs_fsinfo(nmp, vp, cred, td); 74454605Sdillon 74554605Sdillon /* 74654605Sdillon * Synchronously flush pending buffers if we are in synchronous 74754605Sdillon * mode or if we are appending. 74854605Sdillon */ 7491541Srgrimes if (ioflag & (IO_APPEND | IO_SYNC)) { 7501541Srgrimes if (np->n_flag & NMODIFIED) { 7511541Srgrimes np->n_attrstamp = 0; 75283366Sjulian error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 7533305Sphk if (error) 7541541Srgrimes return (error); 7551541Srgrimes } 7561541Srgrimes } 75754605Sdillon 75854605Sdillon /* 75954605Sdillon * If IO_APPEND then load uio_offset. We restart here if we cannot 76054605Sdillon * get the append lock. 76154605Sdillon */ 76254605Sdillonrestart: 76354605Sdillon if (ioflag & IO_APPEND) { 76454605Sdillon np->n_attrstamp = 0; 76583366Sjulian error = VOP_GETATTR(vp, &vattr, cred, td); 76654605Sdillon if (error) 76754605Sdillon return (error); 76854605Sdillon uio->uio_offset = np->n_size; 76954605Sdillon } 77054605Sdillon 7711541Srgrimes if (uio->uio_offset < 0) 7721541Srgrimes return (EINVAL); 77336473Speter if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 77436473Speter return (EFBIG); 7751541Srgrimes if (uio->uio_resid == 0) 7761541Srgrimes return (0); 77754605Sdillon 7781541Srgrimes /* 77954605Sdillon * We need to obtain the rslock if we intend to modify np->n_size 78054605Sdillon * in order to guarentee the append point with multiple contending 78154605Sdillon * writers, to guarentee that no other appenders modify n_size 78254605Sdillon * while we are trying to obtain a truncated buffer (i.e. to avoid 78354605Sdillon * accidently truncating data written by another appender due to 78454605Sdillon * the race), and to ensure that the buffer is populated prior to 78554605Sdillon * our extending of the file. We hold rslock through the entire 78654605Sdillon * operation. 78754605Sdillon * 78854605Sdillon * Note that we do not synchronize the case where someone truncates 78954605Sdillon * the file while we are appending to it because attempting to lock 79054605Sdillon * this case may deadlock other parts of the system unexpectedly. 79154605Sdillon */ 79254605Sdillon if ((ioflag & IO_APPEND) || 79354605Sdillon uio->uio_offset + uio->uio_resid > np->n_size) { 79483366Sjulian switch(nfs_rslock(np, td)) { 79555431Sdillon case ENOLCK: 79654605Sdillon goto restart; 79755431Sdillon /* not reached */ 79855431Sdillon case EINTR: 79955431Sdillon case ERESTART: 80055431Sdillon return(EINTR); 80155431Sdillon /* not reached */ 80255431Sdillon default: 80355431Sdillon break; 80455431Sdillon } 80554605Sdillon haverslock = 1; 80654605Sdillon } 80754605Sdillon 80854605Sdillon /* 8091541Srgrimes * Maybe this should be above the vnode op call, but so long as 8101541Srgrimes * file servers have no limits, i don't think it matters 8111541Srgrimes */ 812125454Sjhb if (p != NULL) { 81373929Sjhb PROC_LOCK(p); 814125454Sjhb if (uio->uio_offset + uio->uio_resid > 815125454Sjhb lim_cur(p, RLIMIT_FSIZE)) { 816125454Sjhb psignal(p, SIGXFSZ); 817125454Sjhb PROC_UNLOCK(p); 818125454Sjhb if (haverslock) 819125454Sjhb nfs_rsunlock(np, td); 820125454Sjhb return (EFBIG); 821125454Sjhb } 82273929Sjhb PROC_UNLOCK(p); 8231541Srgrimes } 82446349Salc 8259428Sdfr biosize = vp->v_mount->mnt_stat.f_iosize; 82646349Salc 8271541Srgrimes do { 8281541Srgrimes nfsstats.biocache_writes++; 8291541Srgrimes lbn = uio->uio_offset / biosize; 8301541Srgrimes on = uio->uio_offset & (biosize-1); 8311541Srgrimes n = min((unsigned)(biosize - on), uio->uio_resid); 8321541Srgrimesagain: 83346349Salc /* 83446349Salc * Handle direct append and file extension cases, calculate 83546349Salc * unaligned buffer size. 83646349Salc */ 83746349Salc 83846349Salc if (uio->uio_offset == np->n_size && n) { 83946349Salc /* 84054605Sdillon * Get the buffer (in its pre-append state to maintain 84154605Sdillon * B_CACHE if it was previously set). Resize the 84254605Sdillon * nfsnode after we have locked the buffer to prevent 84354605Sdillon * readers from reading garbage. 84446349Salc */ 84546349Salc bcount = on; 84683366Sjulian bp = nfs_getcacheblk(vp, lbn, bcount, td); 84746349Salc 84854605Sdillon if (bp != NULL) { 84954605Sdillon long save; 85046349Salc 85154605Sdillon np->n_size = uio->uio_offset + n; 85254605Sdillon np->n_flag |= NMODIFIED; 85354605Sdillon vnode_pager_setsize(vp, np->n_size); 85454605Sdillon 85554605Sdillon save = bp->b_flags & B_CACHE; 85654605Sdillon bcount += n; 85754605Sdillon allocbuf(bp, bcount); 85854605Sdillon bp->b_flags |= save; 85975580Sphk bp->b_magic = B_MAGIC_NFS; 860122698Salfred if ((nmp->nm_flag & NFSMNT_NFSV4) != 0) 861122698Salfred bp->b_op = &buf_ops_nfs4; 862122698Salfred else 863122698Salfred bp->b_op = &buf_ops_nfs; 86454605Sdillon } 86546349Salc } else { 86654605Sdillon /* 86783651Speter * Obtain the locked cache block first, and then 86854605Sdillon * adjust the file's size as appropriate. 86954605Sdillon */ 87054605Sdillon bcount = on + n; 87154605Sdillon if ((off_t)lbn * biosize + bcount < np->n_size) { 87254605Sdillon if ((off_t)(lbn + 1) * biosize < np->n_size) 87354605Sdillon bcount = biosize; 87454605Sdillon else 87554605Sdillon bcount = np->n_size - (off_t)lbn * biosize; 87654605Sdillon } 87783366Sjulian bp = nfs_getcacheblk(vp, lbn, bcount, td); 87846349Salc if (uio->uio_offset + n > np->n_size) { 87946349Salc np->n_size = uio->uio_offset + n; 88046349Salc np->n_flag |= NMODIFIED; 88146349Salc vnode_pager_setsize(vp, np->n_size); 88246349Salc } 8838692Sdg } 88446349Salc 88554605Sdillon if (!bp) { 88654605Sdillon error = EINTR; 88754605Sdillon break; 88854605Sdillon } 88954605Sdillon 89046349Salc /* 89146349Salc * Issue a READ if B_CACHE is not set. In special-append 89246349Salc * mode, B_CACHE is based on the buffer prior to the write 89346349Salc * op and is typically set, avoiding the read. If a read 89446349Salc * is required in special append mode, the server will 89546349Salc * probably send us a short-read since we extended the file 89683651Speter * on our end, resulting in b_resid == 0 and, thusly, 89746349Salc * B_CACHE getting set. 89846349Salc * 89946349Salc * We can also avoid issuing the read if the write covers 90046349Salc * the entire buffer. We have to make sure the buffer state 90146349Salc * is reasonable in this case since we will not be initiating 90246349Salc * I/O. See the comments in kern/vfs_bio.c's getblk() for 90346349Salc * more information. 90446349Salc * 90546349Salc * B_CACHE may also be set due to the buffer being cached 90646349Salc * normally. 90746349Salc */ 90846349Salc 90946349Salc if (on == 0 && n == bcount) { 91046349Salc bp->b_flags |= B_CACHE; 91158934Sphk bp->b_flags &= ~B_INVAL; 91258934Sphk bp->b_ioflags &= ~BIO_ERROR; 9138692Sdg } 91446349Salc 91546349Salc if ((bp->b_flags & B_CACHE) == 0) { 91658345Sphk bp->b_iocmd = BIO_READ; 91746349Salc vfs_busy_pages(bp, 0); 91883366Sjulian error = nfs_doio(bp, cred, td); 91946349Salc if (error) { 92046349Salc brelse(bp); 92154605Sdillon break; 92246349Salc } 92346349Salc } 92454605Sdillon if (!bp) { 92554605Sdillon error = EINTR; 92654605Sdillon break; 92754605Sdillon } 92884827Sjhb if (bp->b_wcred == NOCRED) 92984827Sjhb bp->b_wcred = crhold(cred); 9301541Srgrimes np->n_flag |= NMODIFIED; 9318692Sdg 93245347Sjulian /* 93354605Sdillon * If dirtyend exceeds file size, chop it down. This should 93454605Sdillon * not normally occur but there is an append race where it 93583651Speter * might occur XXX, so we log it. 93654605Sdillon * 93754605Sdillon * If the chopping creates a reverse-indexed or degenerate 93854605Sdillon * situation with dirtyoff/end, we 0 both of them. 93945347Sjulian */ 94045347Sjulian 94154605Sdillon if (bp->b_dirtyend > bcount) { 94283651Speter printf("NFS append race @%lx:%d\n", 94383651Speter (long)bp->b_blkno * DEV_BSIZE, 94454605Sdillon bp->b_dirtyend - bcount); 94554605Sdillon bp->b_dirtyend = bcount; 94654605Sdillon } 94754605Sdillon 94845347Sjulian if (bp->b_dirtyoff >= bp->b_dirtyend) 94945347Sjulian bp->b_dirtyoff = bp->b_dirtyend = 0; 95031617Sdyson 9511541Srgrimes /* 95231617Sdyson * If the new write will leave a contiguous dirty 95331617Sdyson * area, just update the b_dirtyoff and b_dirtyend, 95431617Sdyson * otherwise force a write rpc of the old dirty area. 95546349Salc * 95683651Speter * While it is possible to merge discontiguous writes due to 95746349Salc * our having a B_CACHE buffer ( and thus valid read data 95883651Speter * for the hole), we don't because it could lead to 95946349Salc * significant cache coherency problems with multiple clients, 96046349Salc * especially if locking is implemented later on. 96146349Salc * 96246349Salc * as an optimization we could theoretically maintain 96346349Salc * a linked list of discontinuous areas, but we would still 96446349Salc * have to commit them separately so there isn't much 96546349Salc * advantage to it except perhaps a bit of asynchronization. 96631617Sdyson */ 96742957Sdillon 96831617Sdyson if (bp->b_dirtyend > 0 && 96931617Sdyson (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 970126853Sphk if (bwrite(bp) == EINTR) { 971100194Sdillon error = EINTR; 972100194Sdillon break; 973100194Sdillon } 97431617Sdyson goto again; 97531617Sdyson } 97631617Sdyson 9773305Sphk error = uiomove((char *)bp->b_data + on, n, uio); 97854480Sdillon 97954480Sdillon /* 98054480Sdillon * Since this block is being modified, it must be written 98154480Sdillon * again and not just committed. Since write clustering does 98254480Sdillon * not work for the stage 1 data write, only the stage 2 98354480Sdillon * commit rpc, we have to clear B_CLUSTEROK as well. 98454480Sdillon */ 98554480Sdillon bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 98654480Sdillon 9873305Sphk if (error) { 98858934Sphk bp->b_ioflags |= BIO_ERROR; 9891541Srgrimes brelse(bp); 99054605Sdillon break; 9911541Srgrimes } 99234206Sdyson 99334206Sdyson /* 99483651Speter * Only update dirtyoff/dirtyend if not a degenerate 99545347Sjulian * condition. 99645347Sjulian */ 99745347Sjulian if (n) { 99845347Sjulian if (bp->b_dirtyend > 0) { 99945347Sjulian bp->b_dirtyoff = min(on, bp->b_dirtyoff); 100045347Sjulian bp->b_dirtyend = max((on + n), bp->b_dirtyend); 100145347Sjulian } else { 100245347Sjulian bp->b_dirtyoff = on; 100345347Sjulian bp->b_dirtyend = on + n; 100445347Sjulian } 100546349Salc vfs_bio_set_validclean(bp, on, n); 10061541Srgrimes } 100745347Sjulian 100844679Sjulian /* 100983651Speter * If IO_SYNC do bwrite(). 101046349Salc * 101146349Salc * IO_INVAL appears to be unused. The idea appears to be 101246349Salc * to turn off caching in this case. Very odd. XXX 10131541Srgrimes */ 101483651Speter if ((ioflag & IO_SYNC)) { 101534206Sdyson if (ioflag & IO_INVAL) 101646349Salc bp->b_flags |= B_NOCACHE; 1017126853Sphk error = bwrite(bp); 10183305Sphk if (error) 101954605Sdillon break; 102083651Speter } else if ((n + on) == biosize) { 10219336Sdfr bp->b_flags |= B_ASYNC; 1022122953Salfred (void) (nmp->nm_rpcops->nr_writebp)(bp, 0, 0); 102346349Salc } else { 10241541Srgrimes bdwrite(bp); 102546349Salc } 10261541Srgrimes } while (uio->uio_resid > 0 && n > 0); 102754605Sdillon 102854605Sdillon if (haverslock) 102983366Sjulian nfs_rsunlock(np, td); 103054605Sdillon 103154605Sdillon return (error); 10321541Srgrimes} 10331541Srgrimes 10341541Srgrimes/* 10351541Srgrimes * Get an nfs cache block. 103654480Sdillon * 10371541Srgrimes * Allocate a new one if the block isn't currently in the cache 10381541Srgrimes * and return the block marked busy. If the calling process is 10391541Srgrimes * interrupted by a signal for an interruptible mount point, return 10401541Srgrimes * NULL. 104154480Sdillon * 104254480Sdillon * The caller must carefully deal with the possible B_INVAL state of 104354480Sdillon * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it 104454480Sdillon * indirectly), so synchronous reads can be issued without worrying about 104554480Sdillon * the B_INVAL state. We have to be a little more careful when dealing 104654480Sdillon * with writes (see comments in nfs_write()) when extending a file past 104754480Sdillon * its EOF. 10481541Srgrimes */ 104912911Sphkstatic struct buf * 105083651Speternfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 10511541Srgrimes{ 105283651Speter struct buf *bp; 105332755Sdyson struct mount *mp; 105432755Sdyson struct nfsmount *nmp; 10551541Srgrimes 105632755Sdyson mp = vp->v_mount; 105732755Sdyson nmp = VFSTONFS(mp); 105832755Sdyson 10591541Srgrimes if (nmp->nm_flag & NFSMNT_INT) { 1060111856Sjeff bp = getblk(vp, bn, size, PCATCH, 0, 0); 106199797Sdillon while (bp == NULL) { 106299797Sdillon if (nfs_sigintr(nmp, NULL, td)) 106399797Sdillon return (NULL); 1064111856Sjeff bp = getblk(vp, bn, size, 0, 2 * hz, 0); 10651541Srgrimes } 106646349Salc } else { 1067111856Sjeff bp = getblk(vp, bn, size, 0, 0, 0); 106846349Salc } 10695455Sdg 107041791Sdt if (vp->v_type == VREG) { 107132755Sdyson int biosize; 107246349Salc 107332755Sdyson biosize = mp->mnt_stat.f_iosize; 107441791Sdt bp->b_blkno = bn * (biosize / DEV_BSIZE); 107532755Sdyson } 10761541Srgrimes return (bp); 10771541Srgrimes} 10781541Srgrimes 10791541Srgrimes/* 10801541Srgrimes * Flush and invalidate all dirty buffers. If another process is already 10811541Srgrimes * doing the flush, just wait for completion. 10821541Srgrimes */ 10831549Srgrimesint 108483651Speternfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, 108583651Speter struct thread *td, int intrflg) 10861541Srgrimes{ 108783651Speter struct nfsnode *np = VTONFS(vp); 10881541Srgrimes struct nfsmount *nmp = VFSTONFS(vp->v_mount); 10891541Srgrimes int error = 0, slpflag, slptimeo; 10901541Srgrimes 1091115041Srwatson ASSERT_VOP_LOCKED(vp, "nfs_vinvalbuf"); 1092115041Srwatson 1093120730Sjeff /* 1094120730Sjeff * XXX This check stops us from needlessly doing a vinvalbuf when 1095120730Sjeff * being called through vclean(). It is not clear that this is 1096120730Sjeff * unsafe. 1097120730Sjeff */ 1098120730Sjeff if (vp->v_iflag & VI_XLOCK) 109932755Sdyson return (0); 110032755Sdyson 11011541Srgrimes if ((nmp->nm_flag & NFSMNT_INT) == 0) 11021541Srgrimes intrflg = 0; 11031541Srgrimes if (intrflg) { 11041541Srgrimes slpflag = PCATCH; 11051541Srgrimes slptimeo = 2 * hz; 11061541Srgrimes } else { 11071541Srgrimes slpflag = 0; 11081541Srgrimes slptimeo = 0; 11091541Srgrimes } 11101541Srgrimes /* 11111541Srgrimes * First wait for any other process doing a flush to complete. 11121541Srgrimes */ 11131541Srgrimes while (np->n_flag & NFLUSHINPROG) { 11141541Srgrimes np->n_flag |= NFLUSHWANT; 1115111748Sdes error = tsleep(&np->n_flag, PRIBIO + 2, "nfsvinval", 11161541Srgrimes slptimeo); 111783651Speter if (error && intrflg && 111899797Sdillon nfs_sigintr(nmp, NULL, td)) 11191541Srgrimes return (EINTR); 11201541Srgrimes } 11211541Srgrimes 11221541Srgrimes /* 11231541Srgrimes * Now, flush as required. 11241541Srgrimes */ 11251541Srgrimes np->n_flag |= NFLUSHINPROG; 112683366Sjulian error = vinvalbuf(vp, flags, cred, td, slpflag, 0); 11271541Srgrimes while (error) { 112883651Speter if (intrflg && 112999797Sdillon nfs_sigintr(nmp, NULL, td)) { 11301541Srgrimes np->n_flag &= ~NFLUSHINPROG; 11311541Srgrimes if (np->n_flag & NFLUSHWANT) { 11321541Srgrimes np->n_flag &= ~NFLUSHWANT; 1133111748Sdes wakeup(&np->n_flag); 11341541Srgrimes } 11351541Srgrimes return (EINTR); 11361541Srgrimes } 113783366Sjulian error = vinvalbuf(vp, flags, cred, td, 0, slptimeo); 11381541Srgrimes } 11391541Srgrimes np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 11401541Srgrimes if (np->n_flag & NFLUSHWANT) { 11411541Srgrimes np->n_flag &= ~NFLUSHWANT; 1142111748Sdes wakeup(&np->n_flag); 11431541Srgrimes } 11441541Srgrimes return (0); 11451541Srgrimes} 11461541Srgrimes 11471541Srgrimes/* 11481541Srgrimes * Initiate asynchronous I/O. Return an error if no nfsiods are available. 11491541Srgrimes * This is mainly to avoid queueing async I/O requests when the nfsiods 11501541Srgrimes * are all hung on a dead server. 115146349Salc * 115258934Sphk * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 115346349Salc * is eventually dequeued by the async daemon, nfs_doio() *will*. 11541541Srgrimes */ 11551549Srgrimesint 115683651Speternfs_asyncio(struct buf *bp, struct ucred *cred, struct thread *td) 11571541Srgrimes{ 115819449Sdfr struct nfsmount *nmp; 115989324Speter int iod; 116019449Sdfr int gotiod; 116119449Sdfr int slpflag = 0; 116219449Sdfr int slptimeo = 0; 116319449Sdfr int error; 11641541Srgrimes 116519449Sdfr nmp = VFSTONFS(bp->b_vp->v_mount); 116655431Sdillon 116755431Sdillon /* 116883651Speter * Commits are usually short and sweet so lets save some cpu and 116955431Sdillon * leave the async daemons for more important rpc's (such as reads 117055431Sdillon * and writes). 117155431Sdillon */ 117258345Sphk if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 117355431Sdillon (nmp->nm_bufqiods > nfs_numasync / 2)) { 117455431Sdillon return(EIO); 117555431Sdillon } 117655431Sdillon 117719449Sdfragain: 117819449Sdfr if (nmp->nm_flag & NFSMNT_INT) 117919449Sdfr slpflag = PCATCH; 118019449Sdfr gotiod = FALSE; 118119449Sdfr 118219449Sdfr /* 118319449Sdfr * Find a free iod to process this request. 118419449Sdfr */ 118589407Speter for (iod = 0; iod < nfs_numasync; iod++) 118689324Speter if (nfs_iodwant[iod]) { 118719449Sdfr gotiod = TRUE; 118825023Sdfr break; 118919449Sdfr } 119019449Sdfr 119119449Sdfr /* 119289324Speter * Try to create one if none are free. 119389324Speter */ 119489324Speter if (!gotiod) { 119589324Speter iod = nfs_nfsiodnew(); 119689324Speter if (iod != -1) 119789324Speter gotiod = TRUE; 119889324Speter } 119989324Speter 120089407Speter if (gotiod) { 120189407Speter /* 120289407Speter * Found one, so wake it up and tell it which 120389407Speter * mount to process. 120489407Speter */ 120589407Speter NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n", 120689407Speter iod, nmp)); 120799797Sdillon nfs_iodwant[iod] = NULL; 120889407Speter nfs_iodmount[iod] = nmp; 120989407Speter nmp->nm_bufqiods++; 1210111748Sdes wakeup(&nfs_iodwant[iod]); 121189407Speter } 121289407Speter 121389324Speter /* 121419449Sdfr * If none are free, we may already have an iod working on this mount 121519449Sdfr * point. If so, it will process our request. 121619449Sdfr */ 121719449Sdfr if (!gotiod) { 121819449Sdfr if (nmp->nm_bufqiods > 0) { 121919449Sdfr NFS_DPF(ASYNCIO, 122019449Sdfr ("nfs_asyncio: %d iods are already processing mount %p\n", 122119449Sdfr nmp->nm_bufqiods, nmp)); 122219449Sdfr gotiod = TRUE; 122319449Sdfr } 122419449Sdfr } 122519449Sdfr 122619449Sdfr /* 122719449Sdfr * If we have an iod which can process the request, then queue 122819449Sdfr * the buffer. 122919449Sdfr */ 123019449Sdfr if (gotiod) { 123119449Sdfr /* 123255431Sdillon * Ensure that the queue never grows too large. We still want 123355431Sdillon * to asynchronize so we block rather then return EIO. 123419449Sdfr */ 123519449Sdfr while (nmp->nm_bufqlen >= 2*nfs_numasync) { 123619449Sdfr NFS_DPF(ASYNCIO, 123719449Sdfr ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 123819449Sdfr nmp->nm_bufqwant = TRUE; 123919449Sdfr error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO, 124019449Sdfr "nfsaio", slptimeo); 124119449Sdfr if (error) { 124298988Sjhb if (nfs_sigintr(nmp, NULL, td)) 124319449Sdfr return (EINTR); 124419449Sdfr if (slpflag == PCATCH) { 124519449Sdfr slpflag = 0; 124619449Sdfr slptimeo = 2 * hz; 124719449Sdfr } 124819449Sdfr } 124919449Sdfr /* 125019449Sdfr * We might have lost our iod while sleeping, 125119449Sdfr * so check and loop if nescessary. 125219449Sdfr */ 125319449Sdfr if (nmp->nm_bufqiods == 0) { 125419449Sdfr NFS_DPF(ASYNCIO, 125519449Sdfr ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 125619449Sdfr goto again; 125719449Sdfr } 125819449Sdfr } 125919449Sdfr 126058345Sphk if (bp->b_iocmd == BIO_READ) { 126184827Sjhb if (bp->b_rcred == NOCRED && cred != NOCRED) 126284827Sjhb bp->b_rcred = crhold(cred); 12631541Srgrimes } else { 12649336Sdfr bp->b_flags |= B_WRITEINPROG; 126584827Sjhb if (bp->b_wcred == NOCRED && cred != NOCRED) 126684827Sjhb bp->b_wcred = crhold(cred); 12671541Srgrimes } 12688876Srgrimes 126948225Smckusick BUF_KERNPROC(bp); 127019449Sdfr TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 127119449Sdfr nmp->nm_bufqlen++; 12721541Srgrimes return (0); 127319449Sdfr } 12749336Sdfr 12759336Sdfr /* 127619449Sdfr * All the iods are busy on other mounts, so return EIO to 127719449Sdfr * force the caller to process the i/o synchronously. 12789336Sdfr */ 127919449Sdfr NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 128019449Sdfr return (EIO); 12811541Srgrimes} 12821541Srgrimes 12831541Srgrimes/* 12841541Srgrimes * Do an I/O operation to/from a cache block. This may be called 12851541Srgrimes * synchronously or from an nfsiod. 12861541Srgrimes */ 12871541Srgrimesint 128883651Speternfs_doio(struct buf *bp, struct ucred *cr, struct thread *td) 12891541Srgrimes{ 129044679Sjulian struct uio *uiop; 129144679Sjulian struct vnode *vp; 12921541Srgrimes struct nfsnode *np; 12931541Srgrimes struct nfsmount *nmp; 129446349Salc int error = 0, iomode, must_commit = 0; 12951541Srgrimes struct uio uio; 12961541Srgrimes struct iovec io; 129783651Speter struct proc *p = td ? td->td_proc : NULL; 12981541Srgrimes 12991541Srgrimes vp = bp->b_vp; 13001541Srgrimes np = VTONFS(vp); 13011541Srgrimes nmp = VFSTONFS(vp->v_mount); 13021541Srgrimes uiop = &uio; 13031541Srgrimes uiop->uio_iov = &io; 13041541Srgrimes uiop->uio_iovcnt = 1; 13051541Srgrimes uiop->uio_segflg = UIO_SYSSPACE; 130683366Sjulian uiop->uio_td = td; 13071541Srgrimes 130846349Salc /* 130958934Sphk * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 131046349Salc * do this here so we do not have to do it in all the code that 131146349Salc * calls us. 131246349Salc */ 131358934Sphk bp->b_flags &= ~B_INVAL; 131458934Sphk bp->b_ioflags &= ~BIO_ERROR; 131546349Salc 131644679Sjulian KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp)); 131744679Sjulian 1318121191Sphk if (bp->b_iocmd == BIO_READ) { 13193664Sphk io.iov_len = uiop->uio_resid = bp->b_bcount; 13203664Sphk io.iov_base = bp->b_data; 13211541Srgrimes uiop->uio_rw = UIO_READ; 132287834Sdillon 13231541Srgrimes switch (vp->v_type) { 13241541Srgrimes case VREG: 13259336Sdfr uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 13261541Srgrimes nfsstats.read_bios++; 1327122953Salfred error = (nmp->nm_rpcops->nr_readrpc)(vp, uiop, cr); 132887834Sdillon 13291541Srgrimes if (!error) { 13301541Srgrimes if (uiop->uio_resid) { 13311541Srgrimes /* 133246349Salc * If we had a short read with no error, we must have 133346349Salc * hit a file hole. We should zero-fill the remainder. 133446349Salc * This can also occur if the server hits the file EOF. 133546349Salc * 133683651Speter * Holes used to be able to occur due to pending 133746349Salc * writes, but that is not possible any longer. 13381541Srgrimes */ 133946349Salc int nread = bp->b_bcount - uiop->uio_resid; 134087834Sdillon int left = uiop->uio_resid; 134146349Salc 134246349Salc if (left > 0) 134346349Salc bzero((char *)bp->b_data + nread, left); 134446349Salc uiop->uio_resid = 0; 134546349Salc } 13461541Srgrimes } 1347115041Srwatson /* ASSERT_VOP_LOCKED(vp, "nfs_doio"); */ 1348101308Sjeff if (p && (vp->v_vflag & VV_TEXT) && 134983651Speter (np->n_mtime != np->n_vattr.va_mtime.tv_sec)) { 13501541Srgrimes uprintf("Process killed due to text file modification\n"); 135173929Sjhb PROC_LOCK(p); 13521541Srgrimes psignal(p, SIGKILL); 135373929Sjhb _PHOLD(p); 135473929Sjhb PROC_UNLOCK(p); 13551541Srgrimes } 13561541Srgrimes break; 13571541Srgrimes case VLNK: 13589336Sdfr uiop->uio_offset = (off_t)0; 13591541Srgrimes nfsstats.readlink_bios++; 1360122953Salfred error = (nmp->nm_rpcops->nr_readlinkrpc)(vp, uiop, cr); 13611541Srgrimes break; 13621541Srgrimes case VDIR: 13631541Srgrimes nfsstats.readdir_bios++; 13649336Sdfr uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1365122698Salfred if ((nmp->nm_flag & NFSMNT_NFSV4) != 0) 1366122698Salfred error = nfs4_readdirrpc(vp, uiop, cr); 1367122698Salfred else { 1368122698Salfred if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 1369122698Salfred error = nfs_readdirplusrpc(vp, uiop, cr); 1370122698Salfred if (error == NFSERR_NOTSUPP) 1371122698Salfred nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1372122698Salfred } 1373122698Salfred if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1374122698Salfred error = nfs_readdirrpc(vp, uiop, cr); 13759336Sdfr } 137646349Salc /* 137746349Salc * end-of-directory sets B_INVAL but does not generate an 137846349Salc * error. 137946349Salc */ 138039782Smckusick if (error == 0 && uiop->uio_resid == bp->b_bcount) 138139782Smckusick bp->b_flags |= B_INVAL; 13821541Srgrimes break; 13833305Sphk default: 138483651Speter printf("nfs_doio: type %x unexpected\n", vp->v_type); 13853305Sphk break; 13861541Srgrimes }; 13871541Srgrimes if (error) { 138858934Sphk bp->b_ioflags |= BIO_ERROR; 13891541Srgrimes bp->b_error = error; 13901541Srgrimes } 13911541Srgrimes } else { 139283651Speter /* 139351344Sdillon * If we only need to commit, try to commit 139451344Sdillon */ 139551344Sdillon if (bp->b_flags & B_NEEDCOMMIT) { 139651344Sdillon int retv; 139751344Sdillon off_t off; 139851344Sdillon 139951344Sdillon off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 140051344Sdillon bp->b_flags |= B_WRITEINPROG; 1401122953Salfred retv = (nmp->nm_rpcops->nr_commit)( 140251344Sdillon bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff, 140383366Sjulian bp->b_wcred, td); 140451344Sdillon bp->b_flags &= ~B_WRITEINPROG; 140551344Sdillon if (retv == 0) { 140651344Sdillon bp->b_dirtyoff = bp->b_dirtyend = 0; 140754480Sdillon bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 140851344Sdillon bp->b_resid = 0; 140959249Sphk bufdone(bp); 141051344Sdillon return (0); 141151344Sdillon } 141251344Sdillon if (retv == NFSERR_STALEWRITEVERF) { 141351344Sdillon nfs_clearcommit(bp->b_vp->v_mount); 141451344Sdillon } 141551344Sdillon } 141651344Sdillon 141751344Sdillon /* 141851344Sdillon * Setup for actual write 141951344Sdillon */ 142051344Sdillon 142141791Sdt if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 142241791Sdt bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 14238692Sdg 14248692Sdg if (bp->b_dirtyend > bp->b_dirtyoff) { 14258692Sdg io.iov_len = uiop->uio_resid = bp->b_dirtyend 14269336Sdfr - bp->b_dirtyoff; 142741791Sdt uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 14289336Sdfr + bp->b_dirtyoff; 14298692Sdg io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 14308692Sdg uiop->uio_rw = UIO_WRITE; 14318692Sdg nfsstats.write_bios++; 143244679Sjulian 143325785Sdfr if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 14349336Sdfr iomode = NFSV3WRITE_UNSTABLE; 14358692Sdg else 14369336Sdfr iomode = NFSV3WRITE_FILESYNC; 143744679Sjulian 14389336Sdfr bp->b_flags |= B_WRITEINPROG; 1439122953Salfred error = (nmp->nm_rpcops->nr_writerpc)(vp, uiop, cr, &iomode, &must_commit); 144051475Sdillon 144151475Sdillon /* 144251475Sdillon * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 144351475Sdillon * to cluster the buffers needing commit. This will allow 144451475Sdillon * the system to submit a single commit rpc for the whole 144583651Speter * cluster. We can do this even if the buffer is not 100% 144654480Sdillon * dirty (relative to the NFS blocksize), so we optimize the 144754480Sdillon * append-to-file-case. 144854480Sdillon * 144954480Sdillon * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 145054480Sdillon * cleared because write clustering only works for commit 145154480Sdillon * rpc's, not for the data portion of the write). 145251475Sdillon */ 145351475Sdillon 145425003Sdfr if (!error && iomode == NFSV3WRITE_UNSTABLE) { 145525003Sdfr bp->b_flags |= B_NEEDCOMMIT; 145625003Sdfr if (bp->b_dirtyoff == 0 145746349Salc && bp->b_dirtyend == bp->b_bcount) 145825003Sdfr bp->b_flags |= B_CLUSTEROK; 145944679Sjulian } else { 146054480Sdillon bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 146144679Sjulian } 14629336Sdfr bp->b_flags &= ~B_WRITEINPROG; 14638692Sdg 14649336Sdfr /* 14659336Sdfr * For an interrupted write, the buffer is still valid 14669336Sdfr * and the write hasn't been pushed to the server yet, 146758934Sphk * so we can't set BIO_ERROR and report the interruption 14689336Sdfr * by setting B_EINTR. For the B_ASYNC case, B_EINTR 14699336Sdfr * is not relevant, so the rpc attempt is essentially 14709336Sdfr * a noop. For the case of a V3 write rpc not being 14719336Sdfr * committed to stable storage, the block is still 14729336Sdfr * dirty and requires either a commit rpc or another 14739336Sdfr * write rpc with iomode == NFSV3WRITE_FILESYNC before 14749336Sdfr * the block is reused. This is indicated by setting 14759336Sdfr * the B_DELWRI and B_NEEDCOMMIT flags. 147642957Sdillon * 147742957Sdillon * If the buffer is marked B_PAGING, it does not reside on 147844679Sjulian * the vp's paging queues so we cannot call bdirty(). The 147944679Sjulian * bp in this case is not an NFS cache block so we should 148044679Sjulian * be safe. XXX 14819336Sdfr */ 14829336Sdfr if (error == EINTR 14839336Sdfr || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 148434266Sjulian int s; 148534266Sjulian 148644679Sjulian s = splbio(); 14878692Sdg bp->b_flags &= ~(B_INVAL|B_NOCACHE); 148842957Sdillon if ((bp->b_flags & B_PAGING) == 0) { 148944679Sjulian bdirty(bp); 149044679Sjulian bp->b_flags &= ~B_DONE; 149142957Sdillon } 149247749Speter if (error && (bp->b_flags & B_ASYNC) == 0) 149332755Sdyson bp->b_flags |= B_EINTR; 149444679Sjulian splx(s); 14958692Sdg } else { 149644679Sjulian if (error) { 149758934Sphk bp->b_ioflags |= BIO_ERROR; 149844679Sjulian bp->b_error = np->n_error = error; 149944679Sjulian np->n_flag |= NWRITEERR; 150044679Sjulian } 150144679Sjulian bp->b_dirtyoff = bp->b_dirtyend = 0; 15028692Sdg } 15031541Srgrimes } else { 15048692Sdg bp->b_resid = 0; 150559249Sphk bufdone(bp); 15068692Sdg return (0); 15071541Srgrimes } 15081541Srgrimes } 15091541Srgrimes bp->b_resid = uiop->uio_resid; 15109336Sdfr if (must_commit) 151144679Sjulian nfs_clearcommit(vp->v_mount); 151259249Sphk bufdone(bp); 15131541Srgrimes return (error); 15141541Srgrimes} 151587834Sdillon 151687834Sdillon/* 151787834Sdillon * Used to aid in handling ftruncate() operations on the NFS client side. 151887834Sdillon * Truncation creates a number of special problems for NFS. We have to 151987834Sdillon * throw away VM pages and buffer cache buffers that are beyond EOF, and 152087834Sdillon * we have to properly handle VM pages or (potentially dirty) buffers 152187834Sdillon * that straddle the truncation point. 152287834Sdillon */ 152387834Sdillon 152487834Sdillonint 152587834Sdillonnfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 152687834Sdillon{ 152787834Sdillon struct nfsnode *np = VTONFS(vp); 152887834Sdillon u_quad_t tsize = np->n_size; 152987834Sdillon int biosize = vp->v_mount->mnt_stat.f_iosize; 153087834Sdillon int error = 0; 153187834Sdillon 153287834Sdillon np->n_size = nsize; 153387834Sdillon 153487834Sdillon if (np->n_size < tsize) { 153587834Sdillon struct buf *bp; 153687834Sdillon daddr_t lbn; 153787834Sdillon int bufsize; 153887834Sdillon 153987834Sdillon /* 154087834Sdillon * vtruncbuf() doesn't get the buffer overlapping the 154187834Sdillon * truncation point. We may have a B_DELWRI and/or B_CACHE 154287834Sdillon * buffer that now needs to be truncated. 154387834Sdillon */ 154487834Sdillon error = vtruncbuf(vp, cred, td, nsize, biosize); 154587834Sdillon lbn = nsize / biosize; 154687834Sdillon bufsize = nsize & (biosize - 1); 154787834Sdillon bp = nfs_getcacheblk(vp, lbn, bufsize, td); 154887834Sdillon if (bp->b_dirtyoff > bp->b_bcount) 154987834Sdillon bp->b_dirtyoff = bp->b_bcount; 155087834Sdillon if (bp->b_dirtyend > bp->b_bcount) 155187834Sdillon bp->b_dirtyend = bp->b_bcount; 155287834Sdillon bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 155387834Sdillon brelse(bp); 155487834Sdillon } else { 155587834Sdillon vnode_pager_setsize(vp, nsize); 155687834Sdillon } 155787834Sdillon return(error); 155887834Sdillon} 155987834Sdillon 1560