1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD$"); 37 38#include "opt_kdtrace.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bio.h> 43#include <sys/buf.h> 44#include <sys/kernel.h> 45#include <sys/mount.h> 46#include <sys/vmmeter.h> 47#include <sys/vnode.h> 48 49#include <vm/vm.h> 50#include <vm/vm_param.h> 51#include <vm/vm_extern.h> 52#include <vm/vm_page.h> 53#include <vm/vm_object.h> 54#include <vm/vm_pager.h> 55#include <vm/vnode_pager.h> 56 57#include <fs/nfs/nfsport.h> 58#include <fs/nfsclient/nfsmount.h> 59#include <fs/nfsclient/nfs.h> 60#include <fs/nfsclient/nfsnode.h> 61#include <fs/nfsclient/nfs_kdtrace.h> 62 63extern int newnfs_directio_allow_mmap; 64extern struct nfsstats newnfsstats; 65extern struct mtx ncl_iod_mutex; 66extern int ncl_numasync; 67extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON]; 68extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON]; 69extern int newnfs_directio_enable; 70extern int nfs_keep_dirty_on_error; 71 72int ncl_pbuf_freecnt = -1; /* start out unlimited */ 73 74static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 75 struct thread *td); 76static int nfs_directio_write(struct vnode *vp, struct uio *uiop, 77 struct ucred *cred, int ioflag); 78 79/* 80 * Vnode op for VM getpages. 81 */ 82int 83ncl_getpages(struct vop_getpages_args *ap) 84{ 85 int i, error, nextoff, size, toff, count, npages; 86 struct uio uio; 87 struct iovec iov; 88 vm_offset_t kva; 89 struct buf *bp; 90 struct vnode *vp; 91 struct thread *td; 92 struct ucred *cred; 93 struct nfsmount *nmp; 94 vm_object_t object; 95 vm_page_t *pages; 96 struct nfsnode *np; 97 98 vp = ap->a_vp; 99 np = VTONFS(vp); 100 td = curthread; /* XXX */ 101 cred = curthread->td_ucred; /* XXX */ 102 nmp = VFSTONFS(vp->v_mount); 103 pages = ap->a_m; 104 count = ap->a_count; 105 106 if ((object = vp->v_object) == NULL) { 107 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n"); 108 return (VM_PAGER_ERROR); 109 } 110 111 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) { 112 mtx_lock(&np->n_mtx); 113 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 114 mtx_unlock(&np->n_mtx); 115 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n"); 116 return (VM_PAGER_ERROR); 117 } else 118 mtx_unlock(&np->n_mtx); 119 } 120 121 mtx_lock(&nmp->nm_mtx); 122 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 123 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 124 mtx_unlock(&nmp->nm_mtx); 125 /* We'll never get here for v4, because we always have fsinfo */ 126 (void)ncl_fsinfo(nmp, vp, cred, td); 127 } else 128 mtx_unlock(&nmp->nm_mtx); 129 130 npages = btoc(count); 131 132 /* 133 * If the requested page is partially valid, just return it and 134 * allow the pager to zero-out the blanks. Partially valid pages 135 * can only occur at the file EOF. 136 */ 137 VM_OBJECT_LOCK(object); 138 if (pages[ap->a_reqpage]->valid != 0) { 139 for (i = 0; i < npages; ++i) { 140 if (i != ap->a_reqpage) { 141 vm_page_lock(pages[i]); 142 vm_page_free(pages[i]); 143 vm_page_unlock(pages[i]); 144 } 145 } 146 VM_OBJECT_UNLOCK(object); 147 return (0); 148 } 149 VM_OBJECT_UNLOCK(object); 150 151 /* 152 * We use only the kva address for the buffer, but this is extremely 153 * convienient and fast. 154 */ 155 bp = getpbuf(&ncl_pbuf_freecnt); 156 157 kva = (vm_offset_t) bp->b_data; 158 pmap_qenter(kva, pages, npages); 159 PCPU_INC(cnt.v_vnodein); 160 PCPU_ADD(cnt.v_vnodepgsin, npages); 161 162 iov.iov_base = (caddr_t) kva; 163 iov.iov_len = count; 164 uio.uio_iov = &iov; 165 uio.uio_iovcnt = 1; 166 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 167 uio.uio_resid = count; 168 uio.uio_segflg = UIO_SYSSPACE; 169 uio.uio_rw = UIO_READ; 170 uio.uio_td = td; 171 172 error = ncl_readrpc(vp, &uio, cred); 173 pmap_qremove(kva, npages); 174 175 relpbuf(bp, &ncl_pbuf_freecnt); 176 177 if (error && (uio.uio_resid == count)) { 178 ncl_printf("nfs_getpages: error %d\n", error); 179 VM_OBJECT_LOCK(object); 180 for (i = 0; i < npages; ++i) { 181 if (i != ap->a_reqpage) { 182 vm_page_lock(pages[i]); 183 vm_page_free(pages[i]); 184 vm_page_unlock(pages[i]); 185 } 186 } 187 VM_OBJECT_UNLOCK(object); 188 return (VM_PAGER_ERROR); 189 } 190 191 /* 192 * Calculate the number of bytes read and validate only that number 193 * of bytes. Note that due to pending writes, size may be 0. This 194 * does not mean that the remaining data is invalid! 195 */ 196 197 size = count - uio.uio_resid; 198 VM_OBJECT_LOCK(object); 199 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 200 vm_page_t m; 201 nextoff = toff + PAGE_SIZE; 202 m = pages[i]; 203 204 if (nextoff <= size) { 205 /* 206 * Read operation filled an entire page 207 */ 208 m->valid = VM_PAGE_BITS_ALL; 209 KASSERT(m->dirty == 0, 210 ("nfs_getpages: page %p is dirty", m)); 211 } else if (size > toff) { 212 /* 213 * Read operation filled a partial page. 214 */ 215 m->valid = 0; 216 vm_page_set_valid(m, 0, size - toff); 217 KASSERT(m->dirty == 0, 218 ("nfs_getpages: page %p is dirty", m)); 219 } else { 220 /* 221 * Read operation was short. If no error 222 * occured we may have hit a zero-fill 223 * section. We leave valid set to 0, and page 224 * is freed by vm_page_readahead_finish() if 225 * its index is not equal to requested, or 226 * page is zeroed and set valid by 227 * vm_pager_get_pages() for requested page. 228 */ 229 ; 230 } 231 if (i != ap->a_reqpage) 232 vm_page_readahead_finish(m); 233 } 234 VM_OBJECT_UNLOCK(object); 235 return (0); 236} 237 238/* 239 * Vnode op for VM putpages. 240 */ 241int 242ncl_putpages(struct vop_putpages_args *ap) 243{ 244 struct uio uio; 245 struct iovec iov; 246 vm_offset_t kva; 247 struct buf *bp; 248 int iomode, must_commit, i, error, npages, count; 249 off_t offset; 250 int *rtvals; 251 struct vnode *vp; 252 struct thread *td; 253 struct ucred *cred; 254 struct nfsmount *nmp; 255 struct nfsnode *np; 256 vm_page_t *pages; 257 258 vp = ap->a_vp; 259 np = VTONFS(vp); 260 td = curthread; /* XXX */ 261 /* Set the cred to n_writecred for the write rpcs. */ 262 if (np->n_writecred != NULL) 263 cred = crhold(np->n_writecred); 264 else 265 cred = crhold(curthread->td_ucred); /* XXX */ 266 nmp = VFSTONFS(vp->v_mount); 267 pages = ap->a_m; 268 count = ap->a_count; 269 rtvals = ap->a_rtvals; 270 npages = btoc(count); 271 offset = IDX_TO_OFF(pages[0]->pindex); 272 273 mtx_lock(&nmp->nm_mtx); 274 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 275 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 276 mtx_unlock(&nmp->nm_mtx); 277 (void)ncl_fsinfo(nmp, vp, cred, td); 278 } else 279 mtx_unlock(&nmp->nm_mtx); 280 281 mtx_lock(&np->n_mtx); 282 if (newnfs_directio_enable && !newnfs_directio_allow_mmap && 283 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) { 284 mtx_unlock(&np->n_mtx); 285 ncl_printf("ncl_putpages: called on noncache-able vnode??\n"); 286 mtx_lock(&np->n_mtx); 287 } 288 289 for (i = 0; i < npages; i++) 290 rtvals[i] = VM_PAGER_ERROR; 291 292 /* 293 * When putting pages, do not extend file past EOF. 294 */ 295 if (offset + count > np->n_size) { 296 count = np->n_size - offset; 297 if (count < 0) 298 count = 0; 299 } 300 mtx_unlock(&np->n_mtx); 301 302 /* 303 * We use only the kva address for the buffer, but this is extremely 304 * convienient and fast. 305 */ 306 bp = getpbuf(&ncl_pbuf_freecnt); 307 308 kva = (vm_offset_t) bp->b_data; 309 pmap_qenter(kva, pages, npages); 310 PCPU_INC(cnt.v_vnodeout); 311 PCPU_ADD(cnt.v_vnodepgsout, count); 312 313 iov.iov_base = (caddr_t) kva; 314 iov.iov_len = count; 315 uio.uio_iov = &iov; 316 uio.uio_iovcnt = 1; 317 uio.uio_offset = offset; 318 uio.uio_resid = count; 319 uio.uio_segflg = UIO_SYSSPACE; 320 uio.uio_rw = UIO_WRITE; 321 uio.uio_td = td; 322 323 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 324 iomode = NFSWRITE_UNSTABLE; 325 else 326 iomode = NFSWRITE_FILESYNC; 327 328 error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0); 329 crfree(cred); 330 331 pmap_qremove(kva, npages); 332 relpbuf(bp, &ncl_pbuf_freecnt); 333 334 if (error == 0 || !nfs_keep_dirty_on_error) { 335 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid); 336 if (must_commit) 337 ncl_clearcommit(vp->v_mount); 338 } 339 return rtvals[0]; 340} 341 342/* 343 * For nfs, cache consistency can only be maintained approximately. 344 * Although RFC1094 does not specify the criteria, the following is 345 * believed to be compatible with the reference port. 346 * For nfs: 347 * If the file's modify time on the server has changed since the 348 * last read rpc or you have written to the file, 349 * you may have lost data cache consistency with the 350 * server, so flush all of the file's data out of the cache. 351 * Then force a getattr rpc to ensure that you have up to date 352 * attributes. 353 * NB: This implies that cache data can be read when up to 354 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 355 * attributes this could be forced by setting n_attrstamp to 0 before 356 * the VOP_GETATTR() call. 357 */ 358static inline int 359nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) 360{ 361 int error = 0; 362 struct vattr vattr; 363 struct nfsnode *np = VTONFS(vp); 364 int old_lock; 365 366 /* 367 * Grab the exclusive lock before checking whether the cache is 368 * consistent. 369 * XXX - We can make this cheaper later (by acquiring cheaper locks). 370 * But for now, this suffices. 371 */ 372 old_lock = ncl_upgrade_vnlock(vp); 373 if (vp->v_iflag & VI_DOOMED) { 374 ncl_downgrade_vnlock(vp, old_lock); 375 return (EBADF); 376 } 377 378 mtx_lock(&np->n_mtx); 379 if (np->n_flag & NMODIFIED) { 380 mtx_unlock(&np->n_mtx); 381 if (vp->v_type != VREG) { 382 if (vp->v_type != VDIR) 383 panic("nfs: bioread, not dir"); 384 ncl_invaldir(vp); 385 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 386 if (error) 387 goto out; 388 } 389 np->n_attrstamp = 0; 390 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 391 error = VOP_GETATTR(vp, &vattr, cred); 392 if (error) 393 goto out; 394 mtx_lock(&np->n_mtx); 395 np->n_mtime = vattr.va_mtime; 396 mtx_unlock(&np->n_mtx); 397 } else { 398 mtx_unlock(&np->n_mtx); 399 error = VOP_GETATTR(vp, &vattr, cred); 400 if (error) 401 return (error); 402 mtx_lock(&np->n_mtx); 403 if ((np->n_flag & NSIZECHANGED) 404 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) { 405 mtx_unlock(&np->n_mtx); 406 if (vp->v_type == VDIR) 407 ncl_invaldir(vp); 408 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 409 if (error) 410 goto out; 411 mtx_lock(&np->n_mtx); 412 np->n_mtime = vattr.va_mtime; 413 np->n_flag &= ~NSIZECHANGED; 414 } 415 mtx_unlock(&np->n_mtx); 416 } 417out: 418 ncl_downgrade_vnlock(vp, old_lock); 419 return error; 420} 421 422/* 423 * Vnode op for read using bio 424 */ 425int 426ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 427{ 428 struct nfsnode *np = VTONFS(vp); 429 int biosize, i; 430 struct buf *bp, *rabp; 431 struct thread *td; 432 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 433 daddr_t lbn, rabn; 434 int bcount; 435 int seqcount; 436 int nra, error = 0, n = 0, on = 0; 437 off_t tmp_off; 438 439 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode")); 440 if (uio->uio_resid == 0) 441 return (0); 442 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 443 return (EINVAL); 444 td = uio->uio_td; 445 446 mtx_lock(&nmp->nm_mtx); 447 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 448 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 449 mtx_unlock(&nmp->nm_mtx); 450 (void)ncl_fsinfo(nmp, vp, cred, td); 451 mtx_lock(&nmp->nm_mtx); 452 } 453 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0) 454 (void) newnfs_iosize(nmp); 455 456 tmp_off = uio->uio_offset + uio->uio_resid; 457 if (vp->v_type != VDIR && 458 (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) { 459 mtx_unlock(&nmp->nm_mtx); 460 return (EFBIG); 461 } 462 mtx_unlock(&nmp->nm_mtx); 463 464 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) 465 /* No caching/ no readaheads. Just read data into the user buffer */ 466 return ncl_readrpc(vp, uio, cred); 467 468 biosize = vp->v_bufobj.bo_bsize; 469 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); 470 471 error = nfs_bioread_check_cons(vp, td, cred); 472 if (error) 473 return error; 474 475 do { 476 u_quad_t nsize; 477 478 mtx_lock(&np->n_mtx); 479 nsize = np->n_size; 480 mtx_unlock(&np->n_mtx); 481 482 switch (vp->v_type) { 483 case VREG: 484 NFSINCRGLOBAL(newnfsstats.biocache_reads); 485 lbn = uio->uio_offset / biosize; 486 on = uio->uio_offset & (biosize - 1); 487 488 /* 489 * Start the read ahead(s), as required. 490 */ 491 if (nmp->nm_readahead > 0) { 492 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 493 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { 494 rabn = lbn + 1 + nra; 495 if (incore(&vp->v_bufobj, rabn) == NULL) { 496 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 497 if (!rabp) { 498 error = newnfs_sigintr(nmp, td); 499 return (error ? error : EINTR); 500 } 501 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 502 rabp->b_flags |= B_ASYNC; 503 rabp->b_iocmd = BIO_READ; 504 vfs_busy_pages(rabp, 0); 505 if (ncl_asyncio(nmp, rabp, cred, td)) { 506 rabp->b_flags |= B_INVAL; 507 rabp->b_ioflags |= BIO_ERROR; 508 vfs_unbusy_pages(rabp); 509 brelse(rabp); 510 break; 511 } 512 } else { 513 brelse(rabp); 514 } 515 } 516 } 517 } 518 519 /* Note that bcount is *not* DEV_BSIZE aligned. */ 520 bcount = biosize; 521 if ((off_t)lbn * biosize >= nsize) { 522 bcount = 0; 523 } else if ((off_t)(lbn + 1) * biosize > nsize) { 524 bcount = nsize - (off_t)lbn * biosize; 525 } 526 bp = nfs_getcacheblk(vp, lbn, bcount, td); 527 528 if (!bp) { 529 error = newnfs_sigintr(nmp, td); 530 return (error ? error : EINTR); 531 } 532 533 /* 534 * If B_CACHE is not set, we must issue the read. If this 535 * fails, we return an error. 536 */ 537 538 if ((bp->b_flags & B_CACHE) == 0) { 539 bp->b_iocmd = BIO_READ; 540 vfs_busy_pages(bp, 0); 541 error = ncl_doio(vp, bp, cred, td, 0); 542 if (error) { 543 brelse(bp); 544 return (error); 545 } 546 } 547 548 /* 549 * on is the offset into the current bp. Figure out how many 550 * bytes we can copy out of the bp. Note that bcount is 551 * NOT DEV_BSIZE aligned. 552 * 553 * Then figure out how many bytes we can copy into the uio. 554 */ 555 556 n = 0; 557 if (on < bcount) 558 n = MIN((unsigned)(bcount - on), uio->uio_resid); 559 break; 560 case VLNK: 561 NFSINCRGLOBAL(newnfsstats.biocache_readlinks); 562 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 563 if (!bp) { 564 error = newnfs_sigintr(nmp, td); 565 return (error ? error : EINTR); 566 } 567 if ((bp->b_flags & B_CACHE) == 0) { 568 bp->b_iocmd = BIO_READ; 569 vfs_busy_pages(bp, 0); 570 error = ncl_doio(vp, bp, cred, td, 0); 571 if (error) { 572 bp->b_ioflags |= BIO_ERROR; 573 brelse(bp); 574 return (error); 575 } 576 } 577 n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 578 on = 0; 579 break; 580 case VDIR: 581 NFSINCRGLOBAL(newnfsstats.biocache_readdirs); 582 if (np->n_direofoffset 583 && uio->uio_offset >= np->n_direofoffset) { 584 return (0); 585 } 586 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 587 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 588 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 589 if (!bp) { 590 error = newnfs_sigintr(nmp, td); 591 return (error ? error : EINTR); 592 } 593 if ((bp->b_flags & B_CACHE) == 0) { 594 bp->b_iocmd = BIO_READ; 595 vfs_busy_pages(bp, 0); 596 error = ncl_doio(vp, bp, cred, td, 0); 597 if (error) { 598 brelse(bp); 599 } 600 while (error == NFSERR_BAD_COOKIE) { 601 ncl_invaldir(vp); 602 error = ncl_vinvalbuf(vp, 0, td, 1); 603 /* 604 * Yuck! The directory has been modified on the 605 * server. The only way to get the block is by 606 * reading from the beginning to get all the 607 * offset cookies. 608 * 609 * Leave the last bp intact unless there is an error. 610 * Loop back up to the while if the error is another 611 * NFSERR_BAD_COOKIE (double yuch!). 612 */ 613 for (i = 0; i <= lbn && !error; i++) { 614 if (np->n_direofoffset 615 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 616 return (0); 617 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 618 if (!bp) { 619 error = newnfs_sigintr(nmp, td); 620 return (error ? error : EINTR); 621 } 622 if ((bp->b_flags & B_CACHE) == 0) { 623 bp->b_iocmd = BIO_READ; 624 vfs_busy_pages(bp, 0); 625 error = ncl_doio(vp, bp, cred, td, 0); 626 /* 627 * no error + B_INVAL == directory EOF, 628 * use the block. 629 */ 630 if (error == 0 && (bp->b_flags & B_INVAL)) 631 break; 632 } 633 /* 634 * An error will throw away the block and the 635 * for loop will break out. If no error and this 636 * is not the block we want, we throw away the 637 * block and go for the next one via the for loop. 638 */ 639 if (error || i < lbn) 640 brelse(bp); 641 } 642 } 643 /* 644 * The above while is repeated if we hit another cookie 645 * error. If we hit an error and it wasn't a cookie error, 646 * we give up. 647 */ 648 if (error) 649 return (error); 650 } 651 652 /* 653 * If not eof and read aheads are enabled, start one. 654 * (You need the current block first, so that you have the 655 * directory offset cookie of the next block.) 656 */ 657 if (nmp->nm_readahead > 0 && 658 (bp->b_flags & B_INVAL) == 0 && 659 (np->n_direofoffset == 0 || 660 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 661 incore(&vp->v_bufobj, lbn + 1) == NULL) { 662 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 663 if (rabp) { 664 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 665 rabp->b_flags |= B_ASYNC; 666 rabp->b_iocmd = BIO_READ; 667 vfs_busy_pages(rabp, 0); 668 if (ncl_asyncio(nmp, rabp, cred, td)) { 669 rabp->b_flags |= B_INVAL; 670 rabp->b_ioflags |= BIO_ERROR; 671 vfs_unbusy_pages(rabp); 672 brelse(rabp); 673 } 674 } else { 675 brelse(rabp); 676 } 677 } 678 } 679 /* 680 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 681 * chopped for the EOF condition, we cannot tell how large 682 * NFS directories are going to be until we hit EOF. So 683 * an NFS directory buffer is *not* chopped to its EOF. Now, 684 * it just so happens that b_resid will effectively chop it 685 * to EOF. *BUT* this information is lost if the buffer goes 686 * away and is reconstituted into a B_CACHE state ( due to 687 * being VMIO ) later. So we keep track of the directory eof 688 * in np->n_direofoffset and chop it off as an extra step 689 * right here. 690 */ 691 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 692 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 693 n = np->n_direofoffset - uio->uio_offset; 694 break; 695 default: 696 ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type); 697 bp = NULL; 698 break; 699 }; 700 701 if (n > 0) { 702 error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio); 703 } 704 if (vp->v_type == VLNK) 705 n = 0; 706 if (bp != NULL) 707 brelse(bp); 708 } while (error == 0 && uio->uio_resid > 0 && n > 0); 709 return (error); 710} 711 712/* 713 * The NFS write path cannot handle iovecs with len > 1. So we need to 714 * break up iovecs accordingly (restricting them to wsize). 715 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf). 716 * For the ASYNC case, 2 copies are needed. The first a copy from the 717 * user buffer to a staging buffer and then a second copy from the staging 718 * buffer to mbufs. This can be optimized by copying from the user buffer 719 * directly into mbufs and passing the chain down, but that requires a 720 * fair amount of re-working of the relevant codepaths (and can be done 721 * later). 722 */ 723static int 724nfs_directio_write(vp, uiop, cred, ioflag) 725 struct vnode *vp; 726 struct uio *uiop; 727 struct ucred *cred; 728 int ioflag; 729{ 730 int error; 731 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 732 struct thread *td = uiop->uio_td; 733 int size; 734 int wsize; 735 736 mtx_lock(&nmp->nm_mtx); 737 wsize = nmp->nm_wsize; 738 mtx_unlock(&nmp->nm_mtx); 739 if (ioflag & IO_SYNC) { 740 int iomode, must_commit; 741 struct uio uio; 742 struct iovec iov; 743do_sync: 744 while (uiop->uio_resid > 0) { 745 size = MIN(uiop->uio_resid, wsize); 746 size = MIN(uiop->uio_iov->iov_len, size); 747 iov.iov_base = uiop->uio_iov->iov_base; 748 iov.iov_len = size; 749 uio.uio_iov = &iov; 750 uio.uio_iovcnt = 1; 751 uio.uio_offset = uiop->uio_offset; 752 uio.uio_resid = size; 753 uio.uio_segflg = UIO_USERSPACE; 754 uio.uio_rw = UIO_WRITE; 755 uio.uio_td = td; 756 iomode = NFSWRITE_FILESYNC; 757 error = ncl_writerpc(vp, &uio, cred, &iomode, 758 &must_commit, 0); 759 KASSERT((must_commit == 0), 760 ("ncl_directio_write: Did not commit write")); 761 if (error) 762 return (error); 763 uiop->uio_offset += size; 764 uiop->uio_resid -= size; 765 if (uiop->uio_iov->iov_len <= size) { 766 uiop->uio_iovcnt--; 767 uiop->uio_iov++; 768 } else { 769 uiop->uio_iov->iov_base = 770 (char *)uiop->uio_iov->iov_base + size; 771 uiop->uio_iov->iov_len -= size; 772 } 773 } 774 } else { 775 struct uio *t_uio; 776 struct iovec *t_iov; 777 struct buf *bp; 778 779 /* 780 * Break up the write into blocksize chunks and hand these 781 * over to nfsiod's for write back. 782 * Unfortunately, this incurs a copy of the data. Since 783 * the user could modify the buffer before the write is 784 * initiated. 785 * 786 * The obvious optimization here is that one of the 2 copies 787 * in the async write path can be eliminated by copying the 788 * data here directly into mbufs and passing the mbuf chain 789 * down. But that will require a fair amount of re-working 790 * of the code and can be done if there's enough interest 791 * in NFS directio access. 792 */ 793 while (uiop->uio_resid > 0) { 794 size = MIN(uiop->uio_resid, wsize); 795 size = MIN(uiop->uio_iov->iov_len, size); 796 bp = getpbuf(&ncl_pbuf_freecnt); 797 t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK); 798 t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK); 799 t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK); 800 t_iov->iov_len = size; 801 t_uio->uio_iov = t_iov; 802 t_uio->uio_iovcnt = 1; 803 t_uio->uio_offset = uiop->uio_offset; 804 t_uio->uio_resid = size; 805 t_uio->uio_segflg = UIO_SYSSPACE; 806 t_uio->uio_rw = UIO_WRITE; 807 t_uio->uio_td = td; 808 KASSERT(uiop->uio_segflg == UIO_USERSPACE || 809 uiop->uio_segflg == UIO_SYSSPACE, 810 ("nfs_directio_write: Bad uio_segflg")); 811 if (uiop->uio_segflg == UIO_USERSPACE) { 812 error = copyin(uiop->uio_iov->iov_base, 813 t_iov->iov_base, size); 814 if (error != 0) 815 goto err_free; 816 } else 817 /* 818 * UIO_SYSSPACE may never happen, but handle 819 * it just in case it does. 820 */ 821 bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, 822 size); 823 bp->b_flags |= B_DIRECT; 824 bp->b_iocmd = BIO_WRITE; 825 if (cred != NOCRED) { 826 crhold(cred); 827 bp->b_wcred = cred; 828 } else 829 bp->b_wcred = NOCRED; 830 bp->b_caller1 = (void *)t_uio; 831 bp->b_vp = vp; 832 error = ncl_asyncio(nmp, bp, NOCRED, td); 833err_free: 834 if (error) { 835 free(t_iov->iov_base, M_NFSDIRECTIO); 836 free(t_iov, M_NFSDIRECTIO); 837 free(t_uio, M_NFSDIRECTIO); 838 bp->b_vp = NULL; 839 relpbuf(bp, &ncl_pbuf_freecnt); 840 if (error == EINTR) 841 return (error); 842 goto do_sync; 843 } 844 uiop->uio_offset += size; 845 uiop->uio_resid -= size; 846 if (uiop->uio_iov->iov_len <= size) { 847 uiop->uio_iovcnt--; 848 uiop->uio_iov++; 849 } else { 850 uiop->uio_iov->iov_base = 851 (char *)uiop->uio_iov->iov_base + size; 852 uiop->uio_iov->iov_len -= size; 853 } 854 } 855 } 856 return (0); 857} 858 859/* 860 * Vnode op for write using bio 861 */ 862int 863ncl_write(struct vop_write_args *ap) 864{ 865 int biosize; 866 struct uio *uio = ap->a_uio; 867 struct thread *td = uio->uio_td; 868 struct vnode *vp = ap->a_vp; 869 struct nfsnode *np = VTONFS(vp); 870 struct ucred *cred = ap->a_cred; 871 int ioflag = ap->a_ioflag; 872 struct buf *bp; 873 struct vattr vattr; 874 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 875 daddr_t lbn; 876 int bcount, noncontig_write, obcount; 877 int bp_cached, n, on, error = 0, error1; 878 size_t orig_resid, local_resid; 879 off_t orig_size, tmp_off; 880 881 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); 882 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 883 ("ncl_write proc")); 884 if (vp->v_type != VREG) 885 return (EIO); 886 mtx_lock(&np->n_mtx); 887 if (np->n_flag & NWRITEERR) { 888 np->n_flag &= ~NWRITEERR; 889 mtx_unlock(&np->n_mtx); 890 return (np->n_error); 891 } else 892 mtx_unlock(&np->n_mtx); 893 mtx_lock(&nmp->nm_mtx); 894 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 895 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 896 mtx_unlock(&nmp->nm_mtx); 897 (void)ncl_fsinfo(nmp, vp, cred, td); 898 mtx_lock(&nmp->nm_mtx); 899 } 900 if (nmp->nm_wsize == 0) 901 (void) newnfs_iosize(nmp); 902 mtx_unlock(&nmp->nm_mtx); 903 904 /* 905 * Synchronously flush pending buffers if we are in synchronous 906 * mode or if we are appending. 907 */ 908 if (ioflag & (IO_APPEND | IO_SYNC)) { 909 mtx_lock(&np->n_mtx); 910 if (np->n_flag & NMODIFIED) { 911 mtx_unlock(&np->n_mtx); 912#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */ 913 /* 914 * Require non-blocking, synchronous writes to 915 * dirty files to inform the program it needs 916 * to fsync(2) explicitly. 917 */ 918 if (ioflag & IO_NDELAY) 919 return (EAGAIN); 920#endif 921flush_and_restart: 922 np->n_attrstamp = 0; 923 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 924 error = ncl_vinvalbuf(vp, V_SAVE, td, 1); 925 if (error) 926 return (error); 927 } else 928 mtx_unlock(&np->n_mtx); 929 } 930 931 orig_resid = uio->uio_resid; 932 mtx_lock(&np->n_mtx); 933 orig_size = np->n_size; 934 mtx_unlock(&np->n_mtx); 935 936 /* 937 * If IO_APPEND then load uio_offset. We restart here if we cannot 938 * get the append lock. 939 */ 940 if (ioflag & IO_APPEND) { 941 np->n_attrstamp = 0; 942 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 943 error = VOP_GETATTR(vp, &vattr, cred); 944 if (error) 945 return (error); 946 mtx_lock(&np->n_mtx); 947 uio->uio_offset = np->n_size; 948 mtx_unlock(&np->n_mtx); 949 } 950 951 if (uio->uio_offset < 0) 952 return (EINVAL); 953 tmp_off = uio->uio_offset + uio->uio_resid; 954 if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset) 955 return (EFBIG); 956 if (uio->uio_resid == 0) 957 return (0); 958 959 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG) 960 return nfs_directio_write(vp, uio, cred, ioflag); 961 962 /* 963 * Maybe this should be above the vnode op call, but so long as 964 * file servers have no limits, i don't think it matters 965 */ 966 if (vn_rlimit_fsize(vp, uio, td)) 967 return (EFBIG); 968 969 biosize = vp->v_bufobj.bo_bsize; 970 /* 971 * Find all of this file's B_NEEDCOMMIT buffers. If our writes 972 * would exceed the local maximum per-file write commit size when 973 * combined with those, we must decide whether to flush, 974 * go synchronous, or return error. We don't bother checking 975 * IO_UNIT -- we just make all writes atomic anyway, as there's 976 * no point optimizing for something that really won't ever happen. 977 */ 978 if (!(ioflag & IO_SYNC)) { 979 int nflag; 980 981 mtx_lock(&np->n_mtx); 982 nflag = np->n_flag; 983 mtx_unlock(&np->n_mtx); 984 int needrestart = 0; 985 if (nmp->nm_wcommitsize < uio->uio_resid) { 986 /* 987 * If this request could not possibly be completed 988 * without exceeding the maximum outstanding write 989 * commit size, see if we can convert it into a 990 * synchronous write operation. 991 */ 992 if (ioflag & IO_NDELAY) 993 return (EAGAIN); 994 ioflag |= IO_SYNC; 995 if (nflag & NMODIFIED) 996 needrestart = 1; 997 } else if (nflag & NMODIFIED) { 998 int wouldcommit = 0; 999 BO_LOCK(&vp->v_bufobj); 1000 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { 1001 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, 1002 b_bobufs) { 1003 if (bp->b_flags & B_NEEDCOMMIT) 1004 wouldcommit += bp->b_bcount; 1005 } 1006 } 1007 BO_UNLOCK(&vp->v_bufobj); 1008 /* 1009 * Since we're not operating synchronously and 1010 * bypassing the buffer cache, we are in a commit 1011 * and holding all of these buffers whether 1012 * transmitted or not. If not limited, this 1013 * will lead to the buffer cache deadlocking, 1014 * as no one else can flush our uncommitted buffers. 1015 */ 1016 wouldcommit += uio->uio_resid; 1017 /* 1018 * If we would initially exceed the maximum 1019 * outstanding write commit size, flush and restart. 1020 */ 1021 if (wouldcommit > nmp->nm_wcommitsize) 1022 needrestart = 1; 1023 } 1024 if (needrestart) 1025 goto flush_and_restart; 1026 } 1027 1028 do { 1029 NFSINCRGLOBAL(newnfsstats.biocache_writes); 1030 lbn = uio->uio_offset / biosize; 1031 on = uio->uio_offset & (biosize-1); 1032 n = MIN((unsigned)(biosize - on), uio->uio_resid); 1033again: 1034 /* 1035 * Handle direct append and file extension cases, calculate 1036 * unaligned buffer size. 1037 */ 1038 mtx_lock(&np->n_mtx); 1039 if ((np->n_flag & NHASBEENLOCKED) == 0 && 1040 (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0) 1041 noncontig_write = 1; 1042 else 1043 noncontig_write = 0; 1044 if ((uio->uio_offset == np->n_size || 1045 (noncontig_write != 0 && 1046 lbn == (np->n_size / biosize) && 1047 uio->uio_offset + n > np->n_size)) && n) { 1048 mtx_unlock(&np->n_mtx); 1049 /* 1050 * Get the buffer (in its pre-append state to maintain 1051 * B_CACHE if it was previously set). Resize the 1052 * nfsnode after we have locked the buffer to prevent 1053 * readers from reading garbage. 1054 */ 1055 obcount = np->n_size - (lbn * biosize); 1056 bp = nfs_getcacheblk(vp, lbn, obcount, td); 1057 1058 if (bp != NULL) { 1059 long save; 1060 1061 mtx_lock(&np->n_mtx); 1062 np->n_size = uio->uio_offset + n; 1063 np->n_flag |= NMODIFIED; 1064 vnode_pager_setsize(vp, np->n_size); 1065 mtx_unlock(&np->n_mtx); 1066 1067 save = bp->b_flags & B_CACHE; 1068 bcount = on + n; 1069 allocbuf(bp, bcount); 1070 bp->b_flags |= save; 1071 if (noncontig_write != 0 && on > obcount) 1072 vfs_bio_bzero_buf(bp, obcount, on - 1073 obcount); 1074 } 1075 } else { 1076 /* 1077 * Obtain the locked cache block first, and then 1078 * adjust the file's size as appropriate. 1079 */ 1080 bcount = on + n; 1081 if ((off_t)lbn * biosize + bcount < np->n_size) { 1082 if ((off_t)(lbn + 1) * biosize < np->n_size) 1083 bcount = biosize; 1084 else 1085 bcount = np->n_size - (off_t)lbn * biosize; 1086 } 1087 mtx_unlock(&np->n_mtx); 1088 bp = nfs_getcacheblk(vp, lbn, bcount, td); 1089 mtx_lock(&np->n_mtx); 1090 if (uio->uio_offset + n > np->n_size) { 1091 np->n_size = uio->uio_offset + n; 1092 np->n_flag |= NMODIFIED; 1093 vnode_pager_setsize(vp, np->n_size); 1094 } 1095 mtx_unlock(&np->n_mtx); 1096 } 1097 1098 if (!bp) { 1099 error = newnfs_sigintr(nmp, td); 1100 if (!error) 1101 error = EINTR; 1102 break; 1103 } 1104 1105 /* 1106 * Issue a READ if B_CACHE is not set. In special-append 1107 * mode, B_CACHE is based on the buffer prior to the write 1108 * op and is typically set, avoiding the read. If a read 1109 * is required in special append mode, the server will 1110 * probably send us a short-read since we extended the file 1111 * on our end, resulting in b_resid == 0 and, thusly, 1112 * B_CACHE getting set. 1113 * 1114 * We can also avoid issuing the read if the write covers 1115 * the entire buffer. We have to make sure the buffer state 1116 * is reasonable in this case since we will not be initiating 1117 * I/O. See the comments in kern/vfs_bio.c's getblk() for 1118 * more information. 1119 * 1120 * B_CACHE may also be set due to the buffer being cached 1121 * normally. 1122 */ 1123 1124 bp_cached = 1; 1125 if (on == 0 && n == bcount) { 1126 if ((bp->b_flags & B_CACHE) == 0) 1127 bp_cached = 0; 1128 bp->b_flags |= B_CACHE; 1129 bp->b_flags &= ~B_INVAL; 1130 bp->b_ioflags &= ~BIO_ERROR; 1131 } 1132 1133 if ((bp->b_flags & B_CACHE) == 0) { 1134 bp->b_iocmd = BIO_READ; 1135 vfs_busy_pages(bp, 0); 1136 error = ncl_doio(vp, bp, cred, td, 0); 1137 if (error) { 1138 brelse(bp); 1139 break; 1140 } 1141 } 1142 if (bp->b_wcred == NOCRED) 1143 bp->b_wcred = crhold(cred); 1144 mtx_lock(&np->n_mtx); 1145 np->n_flag |= NMODIFIED; 1146 mtx_unlock(&np->n_mtx); 1147 1148 /* 1149 * If dirtyend exceeds file size, chop it down. This should 1150 * not normally occur but there is an append race where it 1151 * might occur XXX, so we log it. 1152 * 1153 * If the chopping creates a reverse-indexed or degenerate 1154 * situation with dirtyoff/end, we 0 both of them. 1155 */ 1156 1157 if (bp->b_dirtyend > bcount) { 1158 ncl_printf("NFS append race @%lx:%d\n", 1159 (long)bp->b_blkno * DEV_BSIZE, 1160 bp->b_dirtyend - bcount); 1161 bp->b_dirtyend = bcount; 1162 } 1163 1164 if (bp->b_dirtyoff >= bp->b_dirtyend) 1165 bp->b_dirtyoff = bp->b_dirtyend = 0; 1166 1167 /* 1168 * If the new write will leave a contiguous dirty 1169 * area, just update the b_dirtyoff and b_dirtyend, 1170 * otherwise force a write rpc of the old dirty area. 1171 * 1172 * If there has been a file lock applied to this file 1173 * or vfs.nfs.old_noncontig_writing is set, do the following: 1174 * While it is possible to merge discontiguous writes due to 1175 * our having a B_CACHE buffer ( and thus valid read data 1176 * for the hole), we don't because it could lead to 1177 * significant cache coherency problems with multiple clients, 1178 * especially if locking is implemented later on. 1179 * 1180 * If vfs.nfs.old_noncontig_writing is not set and there has 1181 * not been file locking done on this file: 1182 * Relax coherency a bit for the sake of performance and 1183 * expand the current dirty region to contain the new 1184 * write even if it means we mark some non-dirty data as 1185 * dirty. 1186 */ 1187 1188 if (noncontig_write == 0 && bp->b_dirtyend > 0 && 1189 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 1190 if (bwrite(bp) == EINTR) { 1191 error = EINTR; 1192 break; 1193 } 1194 goto again; 1195 } 1196 1197 local_resid = uio->uio_resid; 1198 error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio); 1199 1200 if (error != 0 && !bp_cached) { 1201 /* 1202 * This block has no other content then what 1203 * possibly was written by the faulty uiomove. 1204 * Release it, forgetting the data pages, to 1205 * prevent the leak of uninitialized data to 1206 * usermode. 1207 */ 1208 bp->b_ioflags |= BIO_ERROR; 1209 brelse(bp); 1210 uio->uio_offset -= local_resid - uio->uio_resid; 1211 uio->uio_resid = local_resid; 1212 break; 1213 } 1214 1215 /* 1216 * Since this block is being modified, it must be written 1217 * again and not just committed. Since write clustering does 1218 * not work for the stage 1 data write, only the stage 2 1219 * commit rpc, we have to clear B_CLUSTEROK as well. 1220 */ 1221 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1222 1223 /* 1224 * Get the partial update on the progress made from 1225 * uiomove, if an error occured. 1226 */ 1227 if (error != 0) 1228 n = local_resid - uio->uio_resid; 1229 1230 /* 1231 * Only update dirtyoff/dirtyend if not a degenerate 1232 * condition. 1233 */ 1234 if (n > 0) { 1235 if (bp->b_dirtyend > 0) { 1236 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 1237 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 1238 } else { 1239 bp->b_dirtyoff = on; 1240 bp->b_dirtyend = on + n; 1241 } 1242 vfs_bio_set_valid(bp, on, n); 1243 } 1244 1245 /* 1246 * If IO_SYNC do bwrite(). 1247 * 1248 * IO_INVAL appears to be unused. The idea appears to be 1249 * to turn off caching in this case. Very odd. XXX 1250 */ 1251 if ((ioflag & IO_SYNC)) { 1252 if (ioflag & IO_INVAL) 1253 bp->b_flags |= B_NOCACHE; 1254 error1 = bwrite(bp); 1255 if (error1 != 0) { 1256 if (error == 0) 1257 error = error1; 1258 break; 1259 } 1260 } else if ((n + on) == biosize) { 1261 bp->b_flags |= B_ASYNC; 1262 (void) ncl_writebp(bp, 0, NULL); 1263 } else { 1264 bdwrite(bp); 1265 } 1266 1267 if (error != 0) 1268 break; 1269 } while (uio->uio_resid > 0 && n > 0); 1270 1271 if (error != 0) { 1272 if (ioflag & IO_UNIT) { 1273 VATTR_NULL(&vattr); 1274 vattr.va_size = orig_size; 1275 /* IO_SYNC is handled implicitely */ 1276 (void)VOP_SETATTR(vp, &vattr, cred); 1277 uio->uio_offset -= orig_resid - uio->uio_resid; 1278 uio->uio_resid = orig_resid; 1279 } 1280 } 1281 1282 return (error); 1283} 1284 1285/* 1286 * Get an nfs cache block. 1287 * 1288 * Allocate a new one if the block isn't currently in the cache 1289 * and return the block marked busy. If the calling process is 1290 * interrupted by a signal for an interruptible mount point, return 1291 * NULL. 1292 * 1293 * The caller must carefully deal with the possible B_INVAL state of 1294 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it 1295 * indirectly), so synchronous reads can be issued without worrying about 1296 * the B_INVAL state. We have to be a little more careful when dealing 1297 * with writes (see comments in nfs_write()) when extending a file past 1298 * its EOF. 1299 */ 1300static struct buf * 1301nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1302{ 1303 struct buf *bp; 1304 struct mount *mp; 1305 struct nfsmount *nmp; 1306 1307 mp = vp->v_mount; 1308 nmp = VFSTONFS(mp); 1309 1310 if (nmp->nm_flag & NFSMNT_INT) { 1311 sigset_t oldset; 1312 1313 newnfs_set_sigmask(td, &oldset); 1314 bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0); 1315 newnfs_restore_sigmask(td, &oldset); 1316 while (bp == NULL) { 1317 if (newnfs_sigintr(nmp, td)) 1318 return (NULL); 1319 bp = getblk(vp, bn, size, 0, 2 * hz, 0); 1320 } 1321 } else { 1322 bp = getblk(vp, bn, size, 0, 0, 0); 1323 } 1324 1325 if (vp->v_type == VREG) 1326 bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); 1327 return (bp); 1328} 1329 1330/* 1331 * Flush and invalidate all dirty buffers. If another process is already 1332 * doing the flush, just wait for completion. 1333 */ 1334int 1335ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 1336{ 1337 struct nfsnode *np = VTONFS(vp); 1338 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1339 int error = 0, slpflag, slptimeo; 1340 int old_lock = 0; 1341 1342 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf"); 1343 1344 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1345 intrflg = 0; 1346 if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) 1347 intrflg = 1; 1348 if (intrflg) { 1349 slpflag = NFS_PCATCH; 1350 slptimeo = 2 * hz; 1351 } else { 1352 slpflag = 0; 1353 slptimeo = 0; 1354 } 1355 1356 old_lock = ncl_upgrade_vnlock(vp); 1357 if (vp->v_iflag & VI_DOOMED) { 1358 /* 1359 * Since vgonel() uses the generic vinvalbuf() to flush 1360 * dirty buffers and it does not call this function, it 1361 * is safe to just return OK when VI_DOOMED is set. 1362 */ 1363 ncl_downgrade_vnlock(vp, old_lock); 1364 return (0); 1365 } 1366 1367 /* 1368 * Now, flush as required. 1369 */ 1370 if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) { 1371 VM_OBJECT_LOCK(vp->v_bufobj.bo_object); 1372 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); 1373 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); 1374 /* 1375 * If the page clean was interrupted, fail the invalidation. 1376 * Not doing so, we run the risk of losing dirty pages in the 1377 * vinvalbuf() call below. 1378 */ 1379 if (intrflg && (error = newnfs_sigintr(nmp, td))) 1380 goto out; 1381 } 1382 1383 error = vinvalbuf(vp, flags, slpflag, 0); 1384 while (error) { 1385 if (intrflg && (error = newnfs_sigintr(nmp, td))) 1386 goto out; 1387 error = vinvalbuf(vp, flags, 0, slptimeo); 1388 } 1389 mtx_lock(&np->n_mtx); 1390 if (np->n_directio_asyncwr == 0) 1391 np->n_flag &= ~NMODIFIED; 1392 mtx_unlock(&np->n_mtx); 1393out: 1394 ncl_downgrade_vnlock(vp, old_lock); 1395 return error; 1396} 1397 1398/* 1399 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1400 * This is mainly to avoid queueing async I/O requests when the nfsiods 1401 * are all hung on a dead server. 1402 * 1403 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 1404 * is eventually dequeued by the async daemon, ncl_doio() *will*. 1405 */ 1406int 1407ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td) 1408{ 1409 int iod; 1410 int gotiod; 1411 int slpflag = 0; 1412 int slptimeo = 0; 1413 int error, error2; 1414 1415 /* 1416 * Commits are usually short and sweet so lets save some cpu and 1417 * leave the async daemons for more important rpc's (such as reads 1418 * and writes). 1419 * 1420 * Readdirplus RPCs do vget()s to acquire the vnodes for entries 1421 * in the directory in order to update attributes. This can deadlock 1422 * with another thread that is waiting for async I/O to be done by 1423 * an nfsiod thread while holding a lock on one of these vnodes. 1424 * To avoid this deadlock, don't allow the async nfsiod threads to 1425 * perform Readdirplus RPCs. 1426 */ 1427 mtx_lock(&ncl_iod_mutex); 1428 if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1429 (nmp->nm_bufqiods > ncl_numasync / 2)) || 1430 (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) { 1431 mtx_unlock(&ncl_iod_mutex); 1432 return(EIO); 1433 } 1434again: 1435 if (nmp->nm_flag & NFSMNT_INT) 1436 slpflag = NFS_PCATCH; 1437 gotiod = FALSE; 1438 1439 /* 1440 * Find a free iod to process this request. 1441 */ 1442 for (iod = 0; iod < ncl_numasync; iod++) 1443 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) { 1444 gotiod = TRUE; 1445 break; 1446 } 1447 1448 /* 1449 * Try to create one if none are free. 1450 */ 1451 if (!gotiod) 1452 ncl_nfsiodnew(); 1453 else { 1454 /* 1455 * Found one, so wake it up and tell it which 1456 * mount to process. 1457 */ 1458 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n", 1459 iod, nmp)); 1460 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE; 1461 ncl_iodmount[iod] = nmp; 1462 nmp->nm_bufqiods++; 1463 wakeup(&ncl_iodwant[iod]); 1464 } 1465 1466 /* 1467 * If none are free, we may already have an iod working on this mount 1468 * point. If so, it will process our request. 1469 */ 1470 if (!gotiod) { 1471 if (nmp->nm_bufqiods > 0) { 1472 NFS_DPF(ASYNCIO, 1473 ("ncl_asyncio: %d iods are already processing mount %p\n", 1474 nmp->nm_bufqiods, nmp)); 1475 gotiod = TRUE; 1476 } 1477 } 1478 1479 /* 1480 * If we have an iod which can process the request, then queue 1481 * the buffer. 1482 */ 1483 if (gotiod) { 1484 /* 1485 * Ensure that the queue never grows too large. We still want 1486 * to asynchronize so we block rather then return EIO. 1487 */ 1488 while (nmp->nm_bufqlen >= 2*ncl_numasync) { 1489 NFS_DPF(ASYNCIO, 1490 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp)); 1491 nmp->nm_bufqwant = TRUE; 1492 error = newnfs_msleep(td, &nmp->nm_bufq, 1493 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio", 1494 slptimeo); 1495 if (error) { 1496 error2 = newnfs_sigintr(nmp, td); 1497 if (error2) { 1498 mtx_unlock(&ncl_iod_mutex); 1499 return (error2); 1500 } 1501 if (slpflag == NFS_PCATCH) { 1502 slpflag = 0; 1503 slptimeo = 2 * hz; 1504 } 1505 } 1506 /* 1507 * We might have lost our iod while sleeping, 1508 * so check and loop if nescessary. 1509 */ 1510 goto again; 1511 } 1512 1513 /* We might have lost our nfsiod */ 1514 if (nmp->nm_bufqiods == 0) { 1515 NFS_DPF(ASYNCIO, 1516 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1517 goto again; 1518 } 1519 1520 if (bp->b_iocmd == BIO_READ) { 1521 if (bp->b_rcred == NOCRED && cred != NOCRED) 1522 bp->b_rcred = crhold(cred); 1523 } else { 1524 if (bp->b_wcred == NOCRED && cred != NOCRED) 1525 bp->b_wcred = crhold(cred); 1526 } 1527 1528 if (bp->b_flags & B_REMFREE) 1529 bremfreef(bp); 1530 BUF_KERNPROC(bp); 1531 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1532 nmp->nm_bufqlen++; 1533 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1534 mtx_lock(&(VTONFS(bp->b_vp))->n_mtx); 1535 VTONFS(bp->b_vp)->n_flag |= NMODIFIED; 1536 VTONFS(bp->b_vp)->n_directio_asyncwr++; 1537 mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx); 1538 } 1539 mtx_unlock(&ncl_iod_mutex); 1540 return (0); 1541 } 1542 1543 mtx_unlock(&ncl_iod_mutex); 1544 1545 /* 1546 * All the iods are busy on other mounts, so return EIO to 1547 * force the caller to process the i/o synchronously. 1548 */ 1549 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n")); 1550 return (EIO); 1551} 1552 1553void 1554ncl_doio_directwrite(struct buf *bp) 1555{ 1556 int iomode, must_commit; 1557 struct uio *uiop = (struct uio *)bp->b_caller1; 1558 char *iov_base = uiop->uio_iov->iov_base; 1559 1560 iomode = NFSWRITE_FILESYNC; 1561 uiop->uio_td = NULL; /* NULL since we're in nfsiod */ 1562 ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0); 1563 KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write")); 1564 free(iov_base, M_NFSDIRECTIO); 1565 free(uiop->uio_iov, M_NFSDIRECTIO); 1566 free(uiop, M_NFSDIRECTIO); 1567 if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) { 1568 struct nfsnode *np = VTONFS(bp->b_vp); 1569 mtx_lock(&np->n_mtx); 1570 np->n_directio_asyncwr--; 1571 if (np->n_directio_asyncwr == 0) { 1572 np->n_flag &= ~NMODIFIED; 1573 if ((np->n_flag & NFSYNCWAIT)) { 1574 np->n_flag &= ~NFSYNCWAIT; 1575 wakeup((caddr_t)&np->n_directio_asyncwr); 1576 } 1577 } 1578 mtx_unlock(&np->n_mtx); 1579 } 1580 bp->b_vp = NULL; 1581 relpbuf(bp, &ncl_pbuf_freecnt); 1582} 1583 1584/* 1585 * Do an I/O operation to/from a cache block. This may be called 1586 * synchronously or from an nfsiod. 1587 */ 1588int 1589ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td, 1590 int called_from_strategy) 1591{ 1592 struct uio *uiop; 1593 struct nfsnode *np; 1594 struct nfsmount *nmp; 1595 int error = 0, iomode, must_commit = 0; 1596 struct uio uio; 1597 struct iovec io; 1598 struct proc *p = td ? td->td_proc : NULL; 1599 uint8_t iocmd; 1600 1601 np = VTONFS(vp); 1602 nmp = VFSTONFS(vp->v_mount); 1603 uiop = &uio; 1604 uiop->uio_iov = &io; 1605 uiop->uio_iovcnt = 1; 1606 uiop->uio_segflg = UIO_SYSSPACE; 1607 uiop->uio_td = td; 1608 1609 /* 1610 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 1611 * do this here so we do not have to do it in all the code that 1612 * calls us. 1613 */ 1614 bp->b_flags &= ~B_INVAL; 1615 bp->b_ioflags &= ~BIO_ERROR; 1616 1617 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp)); 1618 iocmd = bp->b_iocmd; 1619 if (iocmd == BIO_READ) { 1620 io.iov_len = uiop->uio_resid = bp->b_bcount; 1621 io.iov_base = bp->b_data; 1622 uiop->uio_rw = UIO_READ; 1623 1624 switch (vp->v_type) { 1625 case VREG: 1626 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1627 NFSINCRGLOBAL(newnfsstats.read_bios); 1628 error = ncl_readrpc(vp, uiop, cr); 1629 1630 if (!error) { 1631 if (uiop->uio_resid) { 1632 /* 1633 * If we had a short read with no error, we must have 1634 * hit a file hole. We should zero-fill the remainder. 1635 * This can also occur if the server hits the file EOF. 1636 * 1637 * Holes used to be able to occur due to pending 1638 * writes, but that is not possible any longer. 1639 */ 1640 int nread = bp->b_bcount - uiop->uio_resid; 1641 ssize_t left = uiop->uio_resid; 1642 1643 if (left > 0) 1644 bzero((char *)bp->b_data + nread, left); 1645 uiop->uio_resid = 0; 1646 } 1647 } 1648 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */ 1649 if (p && (vp->v_vflag & VV_TEXT)) { 1650 mtx_lock(&np->n_mtx); 1651 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) { 1652 mtx_unlock(&np->n_mtx); 1653 PROC_LOCK(p); 1654 killproc(p, "text file modification"); 1655 PROC_UNLOCK(p); 1656 } else 1657 mtx_unlock(&np->n_mtx); 1658 } 1659 break; 1660 case VLNK: 1661 uiop->uio_offset = (off_t)0; 1662 NFSINCRGLOBAL(newnfsstats.readlink_bios); 1663 error = ncl_readlinkrpc(vp, uiop, cr); 1664 break; 1665 case VDIR: 1666 NFSINCRGLOBAL(newnfsstats.readdir_bios); 1667 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1668 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) { 1669 error = ncl_readdirplusrpc(vp, uiop, cr, td); 1670 if (error == NFSERR_NOTSUPP) 1671 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1672 } 1673 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1674 error = ncl_readdirrpc(vp, uiop, cr, td); 1675 /* 1676 * end-of-directory sets B_INVAL but does not generate an 1677 * error. 1678 */ 1679 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1680 bp->b_flags |= B_INVAL; 1681 break; 1682 default: 1683 ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type); 1684 break; 1685 }; 1686 if (error) { 1687 bp->b_ioflags |= BIO_ERROR; 1688 bp->b_error = error; 1689 } 1690 } else { 1691 /* 1692 * If we only need to commit, try to commit 1693 */ 1694 if (bp->b_flags & B_NEEDCOMMIT) { 1695 int retv; 1696 off_t off; 1697 1698 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1699 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, 1700 bp->b_wcred, td); 1701 if (retv == 0) { 1702 bp->b_dirtyoff = bp->b_dirtyend = 0; 1703 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1704 bp->b_resid = 0; 1705 bufdone(bp); 1706 return (0); 1707 } 1708 if (retv == NFSERR_STALEWRITEVERF) { 1709 ncl_clearcommit(vp->v_mount); 1710 } 1711 } 1712 1713 /* 1714 * Setup for actual write 1715 */ 1716 mtx_lock(&np->n_mtx); 1717 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1718 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1719 mtx_unlock(&np->n_mtx); 1720 1721 if (bp->b_dirtyend > bp->b_dirtyoff) { 1722 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1723 - bp->b_dirtyoff; 1724 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1725 + bp->b_dirtyoff; 1726 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1727 uiop->uio_rw = UIO_WRITE; 1728 NFSINCRGLOBAL(newnfsstats.write_bios); 1729 1730 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1731 iomode = NFSWRITE_UNSTABLE; 1732 else 1733 iomode = NFSWRITE_FILESYNC; 1734 1735 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit, 1736 called_from_strategy); 1737 1738 /* 1739 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1740 * to cluster the buffers needing commit. This will allow 1741 * the system to submit a single commit rpc for the whole 1742 * cluster. We can do this even if the buffer is not 100% 1743 * dirty (relative to the NFS blocksize), so we optimize the 1744 * append-to-file-case. 1745 * 1746 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1747 * cleared because write clustering only works for commit 1748 * rpc's, not for the data portion of the write). 1749 */ 1750 1751 if (!error && iomode == NFSWRITE_UNSTABLE) { 1752 bp->b_flags |= B_NEEDCOMMIT; 1753 if (bp->b_dirtyoff == 0 1754 && bp->b_dirtyend == bp->b_bcount) 1755 bp->b_flags |= B_CLUSTEROK; 1756 } else { 1757 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1758 } 1759 1760 /* 1761 * For an interrupted write, the buffer is still valid 1762 * and the write hasn't been pushed to the server yet, 1763 * so we can't set BIO_ERROR and report the interruption 1764 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1765 * is not relevant, so the rpc attempt is essentially 1766 * a noop. For the case of a V3 write rpc not being 1767 * committed to stable storage, the block is still 1768 * dirty and requires either a commit rpc or another 1769 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1770 * the block is reused. This is indicated by setting 1771 * the B_DELWRI and B_NEEDCOMMIT flags. 1772 * 1773 * EIO is returned by ncl_writerpc() to indicate a recoverable 1774 * write error and is handled as above, except that 1775 * B_EINTR isn't set. One cause of this is a stale stateid 1776 * error for the RPC that indicates recovery is required, 1777 * when called with called_from_strategy != 0. 1778 * 1779 * If the buffer is marked B_PAGING, it does not reside on 1780 * the vp's paging queues so we cannot call bdirty(). The 1781 * bp in this case is not an NFS cache block so we should 1782 * be safe. XXX 1783 * 1784 * The logic below breaks up errors into recoverable and 1785 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE 1786 * and keep the buffer around for potential write retries. 1787 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL) 1788 * and save the error in the nfsnode. This is less than ideal 1789 * but necessary. Keeping such buffers around could potentially 1790 * cause buffer exhaustion eventually (they can never be written 1791 * out, so will get constantly be re-dirtied). It also causes 1792 * all sorts of vfs panics. For non-recoverable write errors, 1793 * also invalidate the attrcache, so we'll be forced to go over 1794 * the wire for this object, returning an error to user on next 1795 * call (most of the time). 1796 */ 1797 if (error == EINTR || error == EIO || error == ETIMEDOUT 1798 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1799 int s; 1800 1801 s = splbio(); 1802 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1803 if ((bp->b_flags & B_PAGING) == 0) { 1804 bdirty(bp); 1805 bp->b_flags &= ~B_DONE; 1806 } 1807 if ((error == EINTR || error == ETIMEDOUT) && 1808 (bp->b_flags & B_ASYNC) == 0) 1809 bp->b_flags |= B_EINTR; 1810 splx(s); 1811 } else { 1812 if (error) { 1813 bp->b_ioflags |= BIO_ERROR; 1814 bp->b_flags |= B_INVAL; 1815 bp->b_error = np->n_error = error; 1816 mtx_lock(&np->n_mtx); 1817 np->n_flag |= NWRITEERR; 1818 np->n_attrstamp = 0; 1819 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); 1820 mtx_unlock(&np->n_mtx); 1821 } 1822 bp->b_dirtyoff = bp->b_dirtyend = 0; 1823 } 1824 } else { 1825 bp->b_resid = 0; 1826 bufdone(bp); 1827 return (0); 1828 } 1829 } 1830 bp->b_resid = uiop->uio_resid; 1831 if (must_commit) 1832 ncl_clearcommit(vp->v_mount); 1833 bufdone(bp); 1834 return (error); 1835} 1836 1837/* 1838 * Used to aid in handling ftruncate() operations on the NFS client side. 1839 * Truncation creates a number of special problems for NFS. We have to 1840 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1841 * we have to properly handle VM pages or (potentially dirty) buffers 1842 * that straddle the truncation point. 1843 */ 1844 1845int 1846ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 1847{ 1848 struct nfsnode *np = VTONFS(vp); 1849 u_quad_t tsize; 1850 int biosize = vp->v_bufobj.bo_bsize; 1851 int error = 0; 1852 1853 mtx_lock(&np->n_mtx); 1854 tsize = np->n_size; 1855 np->n_size = nsize; 1856 mtx_unlock(&np->n_mtx); 1857 1858 if (nsize < tsize) { 1859 struct buf *bp; 1860 daddr_t lbn; 1861 int bufsize; 1862 1863 /* 1864 * vtruncbuf() doesn't get the buffer overlapping the 1865 * truncation point. We may have a B_DELWRI and/or B_CACHE 1866 * buffer that now needs to be truncated. 1867 */ 1868 error = vtruncbuf(vp, cred, td, nsize, biosize); 1869 lbn = nsize / biosize; 1870 bufsize = nsize & (biosize - 1); 1871 bp = nfs_getcacheblk(vp, lbn, bufsize, td); 1872 if (!bp) 1873 return EINTR; 1874 if (bp->b_dirtyoff > bp->b_bcount) 1875 bp->b_dirtyoff = bp->b_bcount; 1876 if (bp->b_dirtyend > bp->b_bcount) 1877 bp->b_dirtyend = bp->b_bcount; 1878 bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 1879 brelse(bp); 1880 } else { 1881 vnode_pager_setsize(vp, nsize); 1882 } 1883 return(error); 1884} 1885 1886