nfs_bio.c revision 89407
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 37 */ 38 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_bio.c 89407 2002-01-15 20:57:21Z peter $"); 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bio.h> 45#include <sys/buf.h> 46#include <sys/kernel.h> 47#include <sys/mount.h> 48#include <sys/proc.h> 49#include <sys/resourcevar.h> 50#include <sys/signalvar.h> 51#include <sys/vmmeter.h> 52#include <sys/vnode.h> 53 54#include <vm/vm.h> 55#include <vm/vm_extern.h> 56#include <vm/vm_page.h> 57#include <vm/vm_object.h> 58#include <vm/vm_pager.h> 59#include <vm/vnode_pager.h> 60 61#include <nfs/rpcv2.h> 62#include <nfs/nfsproto.h> 63#include <nfsclient/nfs.h> 64#include <nfsclient/nfsmount.h> 65#include <nfsclient/nfsnode.h> 66 67/* 68 * Just call nfs_writebp() with the force argument set to 1. 69 * 70 * NOTE: B_DONE may or may not be set in a_bp on call. 71 */ 72static int 73nfs_bwrite(struct buf *bp) 74{ 75 76 return (nfs_writebp(bp, 1, curthread)); 77} 78 79struct buf_ops buf_ops_nfs = { 80 "buf_ops_nfs", 81 nfs_bwrite 82}; 83 84static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, 85 struct thread *td); 86 87/* 88 * Vnode op for VM getpages. 89 */ 90int 91nfs_getpages(struct vop_getpages_args *ap) 92{ 93 int i, error, nextoff, size, toff, count, npages; 94 struct uio uio; 95 struct iovec iov; 96 vm_offset_t kva; 97 struct buf *bp; 98 struct vnode *vp; 99 struct thread *td; 100 struct ucred *cred; 101 struct nfsmount *nmp; 102 vm_page_t *pages; 103 104 GIANT_REQUIRED; 105 106 vp = ap->a_vp; 107 td = curthread; /* XXX */ 108 cred = curthread->td_proc->p_ucred; /* XXX */ 109 nmp = VFSTONFS(vp->v_mount); 110 pages = ap->a_m; 111 count = ap->a_count; 112 113 if (vp->v_object == NULL) { 114 printf("nfs_getpages: called with non-merged cache vnode??\n"); 115 return VM_PAGER_ERROR; 116 } 117 118 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 119 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 120 (void)nfs_fsinfo(nmp, vp, cred, td); 121 } 122 123 npages = btoc(count); 124 125 /* 126 * If the requested page is partially valid, just return it and 127 * allow the pager to zero-out the blanks. Partially valid pages 128 * can only occur at the file EOF. 129 */ 130 131 { 132 vm_page_t m = pages[ap->a_reqpage]; 133 134 if (m->valid != 0) { 135 /* handled by vm_fault now */ 136 /* vm_page_zero_invalid(m, TRUE); */ 137 for (i = 0; i < npages; ++i) { 138 if (i != ap->a_reqpage) 139 vm_page_free(pages[i]); 140 } 141 return(0); 142 } 143 } 144 145 /* 146 * We use only the kva address for the buffer, but this is extremely 147 * convienient and fast. 148 */ 149 bp = getpbuf(&nfs_pbuf_freecnt); 150 151 kva = (vm_offset_t) bp->b_data; 152 pmap_qenter(kva, pages, npages); 153 cnt.v_vnodein++; 154 cnt.v_vnodepgsin += npages; 155 156 iov.iov_base = (caddr_t) kva; 157 iov.iov_len = count; 158 uio.uio_iov = &iov; 159 uio.uio_iovcnt = 1; 160 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 161 uio.uio_resid = count; 162 uio.uio_segflg = UIO_SYSSPACE; 163 uio.uio_rw = UIO_READ; 164 uio.uio_td = td; 165 166 error = nfs_readrpc(vp, &uio, cred); 167 pmap_qremove(kva, npages); 168 169 relpbuf(bp, &nfs_pbuf_freecnt); 170 171 if (error && (uio.uio_resid == count)) { 172 printf("nfs_getpages: error %d\n", error); 173 for (i = 0; i < npages; ++i) { 174 if (i != ap->a_reqpage) 175 vm_page_free(pages[i]); 176 } 177 return VM_PAGER_ERROR; 178 } 179 180 /* 181 * Calculate the number of bytes read and validate only that number 182 * of bytes. Note that due to pending writes, size may be 0. This 183 * does not mean that the remaining data is invalid! 184 */ 185 186 size = count - uio.uio_resid; 187 188 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 189 vm_page_t m; 190 nextoff = toff + PAGE_SIZE; 191 m = pages[i]; 192 193 m->flags &= ~PG_ZERO; 194 195 if (nextoff <= size) { 196 /* 197 * Read operation filled an entire page 198 */ 199 m->valid = VM_PAGE_BITS_ALL; 200 vm_page_undirty(m); 201 } else if (size > toff) { 202 /* 203 * Read operation filled a partial page. 204 */ 205 m->valid = 0; 206 vm_page_set_validclean(m, 0, size - toff); 207 /* handled by vm_fault now */ 208 /* vm_page_zero_invalid(m, TRUE); */ 209 } else { 210 /* 211 * Read operation was short. If no error occured 212 * we may have hit a zero-fill section. We simply 213 * leave valid set to 0. 214 */ 215 ; 216 } 217 if (i != ap->a_reqpage) { 218 /* 219 * Whether or not to leave the page activated is up in 220 * the air, but we should put the page on a page queue 221 * somewhere (it already is in the object). Result: 222 * It appears that emperical results show that 223 * deactivating pages is best. 224 */ 225 226 /* 227 * Just in case someone was asking for this page we 228 * now tell them that it is ok to use. 229 */ 230 if (!error) { 231 if (m->flags & PG_WANTED) 232 vm_page_activate(m); 233 else 234 vm_page_deactivate(m); 235 vm_page_wakeup(m); 236 } else { 237 vm_page_free(m); 238 } 239 } 240 } 241 return 0; 242} 243 244/* 245 * Vnode op for VM putpages. 246 */ 247int 248nfs_putpages(struct vop_putpages_args *ap) 249{ 250 struct uio uio; 251 struct iovec iov; 252 vm_offset_t kva; 253 struct buf *bp; 254 int iomode, must_commit, i, error, npages, count; 255 off_t offset; 256 int *rtvals; 257 struct vnode *vp; 258 struct thread *td; 259 struct ucred *cred; 260 struct nfsmount *nmp; 261 struct nfsnode *np; 262 vm_page_t *pages; 263 264 GIANT_REQUIRED; 265 266 vp = ap->a_vp; 267 np = VTONFS(vp); 268 td = curthread; /* XXX */ 269 cred = curthread->td_proc->p_ucred; /* XXX */ 270 nmp = VFSTONFS(vp->v_mount); 271 pages = ap->a_m; 272 count = ap->a_count; 273 rtvals = ap->a_rtvals; 274 npages = btoc(count); 275 offset = IDX_TO_OFF(pages[0]->pindex); 276 277 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 278 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) { 279 (void)nfs_fsinfo(nmp, vp, cred, td); 280 } 281 282 for (i = 0; i < npages; i++) 283 rtvals[i] = VM_PAGER_AGAIN; 284 285 /* 286 * When putting pages, do not extend file past EOF. 287 */ 288 289 if (offset + count > np->n_size) { 290 count = np->n_size - offset; 291 if (count < 0) 292 count = 0; 293 } 294 295 /* 296 * We use only the kva address for the buffer, but this is extremely 297 * convienient and fast. 298 */ 299 bp = getpbuf(&nfs_pbuf_freecnt); 300 301 kva = (vm_offset_t) bp->b_data; 302 pmap_qenter(kva, pages, npages); 303 cnt.v_vnodeout++; 304 cnt.v_vnodepgsout += count; 305 306 iov.iov_base = (caddr_t) kva; 307 iov.iov_len = count; 308 uio.uio_iov = &iov; 309 uio.uio_iovcnt = 1; 310 uio.uio_offset = offset; 311 uio.uio_resid = count; 312 uio.uio_segflg = UIO_SYSSPACE; 313 uio.uio_rw = UIO_WRITE; 314 uio.uio_td = td; 315 316 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 317 iomode = NFSV3WRITE_UNSTABLE; 318 else 319 iomode = NFSV3WRITE_FILESYNC; 320 321 error = nfs_writerpc(vp, &uio, cred, &iomode, &must_commit); 322 323 pmap_qremove(kva, npages); 324 relpbuf(bp, &nfs_pbuf_freecnt); 325 326 if (!error) { 327 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 328 for (i = 0; i < nwritten; i++) { 329 rtvals[i] = VM_PAGER_OK; 330 vm_page_undirty(pages[i]); 331 } 332 if (must_commit) { 333 nfs_clearcommit(vp->v_mount); 334 } 335 } 336 return rtvals[0]; 337} 338 339/* 340 * Vnode op for read using bio 341 */ 342int 343nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 344{ 345 struct nfsnode *np = VTONFS(vp); 346 int biosize, i; 347 struct buf *bp = 0, *rabp; 348 struct vattr vattr; 349 struct thread *td; 350 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 351 daddr_t lbn, rabn; 352 int bcount; 353 int seqcount; 354 int nra, error = 0, n = 0, on = 0; 355 356#ifdef DIAGNOSTIC 357 if (uio->uio_rw != UIO_READ) 358 panic("nfs_read mode"); 359#endif 360 if (uio->uio_resid == 0) 361 return (0); 362 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */ 363 return (EINVAL); 364 td = uio->uio_td; 365 366 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 367 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 368 (void)nfs_fsinfo(nmp, vp, cred, td); 369 if (vp->v_type != VDIR && 370 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 371 return (EFBIG); 372 biosize = vp->v_mount->mnt_stat.f_iosize; 373 seqcount = (int)((off_t)(ioflag >> 16) * biosize / BKVASIZE); 374 /* 375 * For nfs, cache consistency can only be maintained approximately. 376 * Although RFC1094 does not specify the criteria, the following is 377 * believed to be compatible with the reference port. 378 * For nfs: 379 * If the file's modify time on the server has changed since the 380 * last read rpc or you have written to the file, 381 * you may have lost data cache consistency with the 382 * server, so flush all of the file's data out of the cache. 383 * Then force a getattr rpc to ensure that you have up to date 384 * attributes. 385 * NB: This implies that cache data can be read when up to 386 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 387 * attributes this could be forced by setting n_attrstamp to 0 before 388 * the VOP_GETATTR() call. 389 */ 390 if (np->n_flag & NMODIFIED) { 391 if (vp->v_type != VREG) { 392 if (vp->v_type != VDIR) 393 panic("nfs: bioread, not dir"); 394 nfs_invaldir(vp); 395 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 396 if (error) 397 return (error); 398 } 399 np->n_attrstamp = 0; 400 error = VOP_GETATTR(vp, &vattr, cred, td); 401 if (error) 402 return (error); 403 np->n_mtime = vattr.va_mtime.tv_sec; 404 } else { 405 error = VOP_GETATTR(vp, &vattr, cred, td); 406 if (error) 407 return (error); 408 if (np->n_mtime != vattr.va_mtime.tv_sec) { 409 if (vp->v_type == VDIR) 410 nfs_invaldir(vp); 411 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 412 if (error) 413 return (error); 414 np->n_mtime = vattr.va_mtime.tv_sec; 415 } 416 } 417 do { 418 switch (vp->v_type) { 419 case VREG: 420 nfsstats.biocache_reads++; 421 lbn = uio->uio_offset / biosize; 422 on = uio->uio_offset & (biosize - 1); 423 424 /* 425 * Start the read ahead(s), as required. 426 */ 427 if (nmp->nm_readahead > 0) { 428 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && 429 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 430 rabn = lbn + 1 + nra; 431 if (!incore(vp, rabn)) { 432 rabp = nfs_getcacheblk(vp, rabn, biosize, td); 433 if (!rabp) 434 return (EINTR); 435 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 436 rabp->b_flags |= B_ASYNC; 437 rabp->b_iocmd = BIO_READ; 438 vfs_busy_pages(rabp, 0); 439 if (nfs_asyncio(rabp, cred, td)) { 440 rabp->b_flags |= B_INVAL; 441 rabp->b_ioflags |= BIO_ERROR; 442 vfs_unbusy_pages(rabp); 443 brelse(rabp); 444 break; 445 } 446 } else { 447 brelse(rabp); 448 } 449 } 450 } 451 } 452 453 /* 454 * Obtain the buffer cache block. Figure out the buffer size 455 * when we are at EOF. If we are modifying the size of the 456 * buffer based on an EOF condition we need to hold 457 * nfs_rslock() through obtaining the buffer to prevent 458 * a potential writer-appender from messing with n_size. 459 * Otherwise we may accidently truncate the buffer and 460 * lose dirty data. 461 * 462 * Note that bcount is *not* DEV_BSIZE aligned. 463 */ 464 465again: 466 bcount = biosize; 467 if ((off_t)lbn * biosize >= np->n_size) { 468 bcount = 0; 469 } else if ((off_t)(lbn + 1) * biosize > np->n_size) { 470 bcount = np->n_size - (off_t)lbn * biosize; 471 } 472 if (bcount != biosize) { 473 switch(nfs_rslock(np, td)) { 474 case ENOLCK: 475 goto again; 476 /* not reached */ 477 case EINTR: 478 case ERESTART: 479 return(EINTR); 480 /* not reached */ 481 default: 482 break; 483 } 484 } 485 486 bp = nfs_getcacheblk(vp, lbn, bcount, td); 487 488 if (bcount != biosize) 489 nfs_rsunlock(np, td); 490 if (!bp) 491 return (EINTR); 492 493 /* 494 * If B_CACHE is not set, we must issue the read. If this 495 * fails, we return an error. 496 */ 497 498 if ((bp->b_flags & B_CACHE) == 0) { 499 bp->b_iocmd = BIO_READ; 500 vfs_busy_pages(bp, 0); 501 error = nfs_doio(bp, cred, td); 502 if (error) { 503 brelse(bp); 504 return (error); 505 } 506 } 507 508 /* 509 * on is the offset into the current bp. Figure out how many 510 * bytes we can copy out of the bp. Note that bcount is 511 * NOT DEV_BSIZE aligned. 512 * 513 * Then figure out how many bytes we can copy into the uio. 514 */ 515 516 n = 0; 517 if (on < bcount) 518 n = min((unsigned)(bcount - on), uio->uio_resid); 519 break; 520 case VLNK: 521 nfsstats.biocache_readlinks++; 522 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td); 523 if (!bp) 524 return (EINTR); 525 if ((bp->b_flags & B_CACHE) == 0) { 526 bp->b_iocmd = BIO_READ; 527 vfs_busy_pages(bp, 0); 528 error = nfs_doio(bp, cred, td); 529 if (error) { 530 bp->b_ioflags |= BIO_ERROR; 531 brelse(bp); 532 return (error); 533 } 534 } 535 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 536 on = 0; 537 break; 538 case VDIR: 539 nfsstats.biocache_readdirs++; 540 if (np->n_direofoffset 541 && uio->uio_offset >= np->n_direofoffset) { 542 return (0); 543 } 544 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ; 545 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 546 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td); 547 if (!bp) 548 return (EINTR); 549 if ((bp->b_flags & B_CACHE) == 0) { 550 bp->b_iocmd = BIO_READ; 551 vfs_busy_pages(bp, 0); 552 error = nfs_doio(bp, cred, td); 553 if (error) { 554 brelse(bp); 555 } 556 while (error == NFSERR_BAD_COOKIE) { 557 printf("got bad cookie vp %p bp %p\n", vp, bp); 558 nfs_invaldir(vp); 559 error = nfs_vinvalbuf(vp, 0, cred, td, 1); 560 /* 561 * Yuck! The directory has been modified on the 562 * server. The only way to get the block is by 563 * reading from the beginning to get all the 564 * offset cookies. 565 * 566 * Leave the last bp intact unless there is an error. 567 * Loop back up to the while if the error is another 568 * NFSERR_BAD_COOKIE (double yuch!). 569 */ 570 for (i = 0; i <= lbn && !error; i++) { 571 if (np->n_direofoffset 572 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 573 return (0); 574 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td); 575 if (!bp) 576 return (EINTR); 577 if ((bp->b_flags & B_CACHE) == 0) { 578 bp->b_iocmd = BIO_READ; 579 vfs_busy_pages(bp, 0); 580 error = nfs_doio(bp, cred, td); 581 /* 582 * no error + B_INVAL == directory EOF, 583 * use the block. 584 */ 585 if (error == 0 && (bp->b_flags & B_INVAL)) 586 break; 587 } 588 /* 589 * An error will throw away the block and the 590 * for loop will break out. If no error and this 591 * is not the block we want, we throw away the 592 * block and go for the next one via the for loop. 593 */ 594 if (error || i < lbn) 595 brelse(bp); 596 } 597 } 598 /* 599 * The above while is repeated if we hit another cookie 600 * error. If we hit an error and it wasn't a cookie error, 601 * we give up. 602 */ 603 if (error) 604 return (error); 605 } 606 607 /* 608 * If not eof and read aheads are enabled, start one. 609 * (You need the current block first, so that you have the 610 * directory offset cookie of the next block.) 611 */ 612 if (nmp->nm_readahead > 0 && 613 (bp->b_flags & B_INVAL) == 0 && 614 (np->n_direofoffset == 0 || 615 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 616 !incore(vp, lbn + 1)) { 617 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); 618 if (rabp) { 619 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 620 rabp->b_flags |= B_ASYNC; 621 rabp->b_iocmd = BIO_READ; 622 vfs_busy_pages(rabp, 0); 623 if (nfs_asyncio(rabp, cred, td)) { 624 rabp->b_flags |= B_INVAL; 625 rabp->b_ioflags |= BIO_ERROR; 626 vfs_unbusy_pages(rabp); 627 brelse(rabp); 628 } 629 } else { 630 brelse(rabp); 631 } 632 } 633 } 634 /* 635 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is 636 * chopped for the EOF condition, we cannot tell how large 637 * NFS directories are going to be until we hit EOF. So 638 * an NFS directory buffer is *not* chopped to its EOF. Now, 639 * it just so happens that b_resid will effectively chop it 640 * to EOF. *BUT* this information is lost if the buffer goes 641 * away and is reconstituted into a B_CACHE state ( due to 642 * being VMIO ) later. So we keep track of the directory eof 643 * in np->n_direofoffset and chop it off as an extra step 644 * right here. 645 */ 646 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 647 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset) 648 n = np->n_direofoffset - uio->uio_offset; 649 break; 650 default: 651 printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 652 break; 653 }; 654 655 if (n > 0) { 656 error = uiomove(bp->b_data + on, (int)n, uio); 657 } 658 switch (vp->v_type) { 659 case VREG: 660 break; 661 case VLNK: 662 n = 0; 663 break; 664 case VDIR: 665 break; 666 default: 667 printf(" nfs_bioread: type %x unexpected\n", vp->v_type); 668 } 669 brelse(bp); 670 } while (error == 0 && uio->uio_resid > 0 && n > 0); 671 return (error); 672} 673 674/* 675 * Vnode op for write using bio 676 */ 677int 678nfs_write(struct vop_write_args *ap) 679{ 680 int biosize; 681 struct uio *uio = ap->a_uio; 682 struct thread *td = uio->uio_td; 683 struct vnode *vp = ap->a_vp; 684 struct nfsnode *np = VTONFS(vp); 685 struct ucred *cred = ap->a_cred; 686 int ioflag = ap->a_ioflag; 687 struct buf *bp; 688 struct vattr vattr; 689 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 690 daddr_t lbn; 691 int bcount; 692 int n, on, error = 0; 693 int haverslock = 0; 694 struct proc *p = td?td->td_proc:NULL; 695 696 GIANT_REQUIRED; 697 698#ifdef DIAGNOSTIC 699 if (uio->uio_rw != UIO_WRITE) 700 panic("nfs_write mode"); 701 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread) 702 panic("nfs_write proc"); 703#endif 704 if (vp->v_type != VREG) 705 return (EIO); 706 if (np->n_flag & NWRITEERR) { 707 np->n_flag &= ~NWRITEERR; 708 return (np->n_error); 709 } 710 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 && 711 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) 712 (void)nfs_fsinfo(nmp, vp, cred, td); 713 714 /* 715 * Synchronously flush pending buffers if we are in synchronous 716 * mode or if we are appending. 717 */ 718 if (ioflag & (IO_APPEND | IO_SYNC)) { 719 if (np->n_flag & NMODIFIED) { 720 np->n_attrstamp = 0; 721 error = nfs_vinvalbuf(vp, V_SAVE, cred, td, 1); 722 if (error) 723 return (error); 724 } 725 } 726 727 /* 728 * If IO_APPEND then load uio_offset. We restart here if we cannot 729 * get the append lock. 730 */ 731restart: 732 if (ioflag & IO_APPEND) { 733 np->n_attrstamp = 0; 734 error = VOP_GETATTR(vp, &vattr, cred, td); 735 if (error) 736 return (error); 737 uio->uio_offset = np->n_size; 738 } 739 740 if (uio->uio_offset < 0) 741 return (EINVAL); 742 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize) 743 return (EFBIG); 744 if (uio->uio_resid == 0) 745 return (0); 746 747 /* 748 * We need to obtain the rslock if we intend to modify np->n_size 749 * in order to guarentee the append point with multiple contending 750 * writers, to guarentee that no other appenders modify n_size 751 * while we are trying to obtain a truncated buffer (i.e. to avoid 752 * accidently truncating data written by another appender due to 753 * the race), and to ensure that the buffer is populated prior to 754 * our extending of the file. We hold rslock through the entire 755 * operation. 756 * 757 * Note that we do not synchronize the case where someone truncates 758 * the file while we are appending to it because attempting to lock 759 * this case may deadlock other parts of the system unexpectedly. 760 */ 761 if ((ioflag & IO_APPEND) || 762 uio->uio_offset + uio->uio_resid > np->n_size) { 763 switch(nfs_rslock(np, td)) { 764 case ENOLCK: 765 goto restart; 766 /* not reached */ 767 case EINTR: 768 case ERESTART: 769 return(EINTR); 770 /* not reached */ 771 default: 772 break; 773 } 774 haverslock = 1; 775 } 776 777 /* 778 * Maybe this should be above the vnode op call, but so long as 779 * file servers have no limits, i don't think it matters 780 */ 781 if (p && uio->uio_offset + uio->uio_resid > 782 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 783 PROC_LOCK(p); 784 psignal(p, SIGXFSZ); 785 PROC_UNLOCK(p); 786 if (haverslock) 787 nfs_rsunlock(np, td); 788 return (EFBIG); 789 } 790 791 biosize = vp->v_mount->mnt_stat.f_iosize; 792 793 do { 794 nfsstats.biocache_writes++; 795 lbn = uio->uio_offset / biosize; 796 on = uio->uio_offset & (biosize-1); 797 n = min((unsigned)(biosize - on), uio->uio_resid); 798again: 799 /* 800 * Handle direct append and file extension cases, calculate 801 * unaligned buffer size. 802 */ 803 804 if (uio->uio_offset == np->n_size && n) { 805 /* 806 * Get the buffer (in its pre-append state to maintain 807 * B_CACHE if it was previously set). Resize the 808 * nfsnode after we have locked the buffer to prevent 809 * readers from reading garbage. 810 */ 811 bcount = on; 812 bp = nfs_getcacheblk(vp, lbn, bcount, td); 813 814 if (bp != NULL) { 815 long save; 816 817 np->n_size = uio->uio_offset + n; 818 np->n_flag |= NMODIFIED; 819 vnode_pager_setsize(vp, np->n_size); 820 821 save = bp->b_flags & B_CACHE; 822 bcount += n; 823 allocbuf(bp, bcount); 824 bp->b_flags |= save; 825 bp->b_magic = B_MAGIC_NFS; 826 bp->b_op = &buf_ops_nfs; 827 } 828 } else { 829 /* 830 * Obtain the locked cache block first, and then 831 * adjust the file's size as appropriate. 832 */ 833 bcount = on + n; 834 if ((off_t)lbn * biosize + bcount < np->n_size) { 835 if ((off_t)(lbn + 1) * biosize < np->n_size) 836 bcount = biosize; 837 else 838 bcount = np->n_size - (off_t)lbn * biosize; 839 } 840 bp = nfs_getcacheblk(vp, lbn, bcount, td); 841 if (uio->uio_offset + n > np->n_size) { 842 np->n_size = uio->uio_offset + n; 843 np->n_flag |= NMODIFIED; 844 vnode_pager_setsize(vp, np->n_size); 845 } 846 } 847 848 if (!bp) { 849 error = EINTR; 850 break; 851 } 852 853 /* 854 * Issue a READ if B_CACHE is not set. In special-append 855 * mode, B_CACHE is based on the buffer prior to the write 856 * op and is typically set, avoiding the read. If a read 857 * is required in special append mode, the server will 858 * probably send us a short-read since we extended the file 859 * on our end, resulting in b_resid == 0 and, thusly, 860 * B_CACHE getting set. 861 * 862 * We can also avoid issuing the read if the write covers 863 * the entire buffer. We have to make sure the buffer state 864 * is reasonable in this case since we will not be initiating 865 * I/O. See the comments in kern/vfs_bio.c's getblk() for 866 * more information. 867 * 868 * B_CACHE may also be set due to the buffer being cached 869 * normally. 870 */ 871 872 if (on == 0 && n == bcount) { 873 bp->b_flags |= B_CACHE; 874 bp->b_flags &= ~B_INVAL; 875 bp->b_ioflags &= ~BIO_ERROR; 876 } 877 878 if ((bp->b_flags & B_CACHE) == 0) { 879 bp->b_iocmd = BIO_READ; 880 vfs_busy_pages(bp, 0); 881 error = nfs_doio(bp, cred, td); 882 if (error) { 883 brelse(bp); 884 break; 885 } 886 } 887 if (!bp) { 888 error = EINTR; 889 break; 890 } 891 if (bp->b_wcred == NOCRED) 892 bp->b_wcred = crhold(cred); 893 np->n_flag |= NMODIFIED; 894 895 /* 896 * If dirtyend exceeds file size, chop it down. This should 897 * not normally occur but there is an append race where it 898 * might occur XXX, so we log it. 899 * 900 * If the chopping creates a reverse-indexed or degenerate 901 * situation with dirtyoff/end, we 0 both of them. 902 */ 903 904 if (bp->b_dirtyend > bcount) { 905 printf("NFS append race @%lx:%d\n", 906 (long)bp->b_blkno * DEV_BSIZE, 907 bp->b_dirtyend - bcount); 908 bp->b_dirtyend = bcount; 909 } 910 911 if (bp->b_dirtyoff >= bp->b_dirtyend) 912 bp->b_dirtyoff = bp->b_dirtyend = 0; 913 914 /* 915 * If the new write will leave a contiguous dirty 916 * area, just update the b_dirtyoff and b_dirtyend, 917 * otherwise force a write rpc of the old dirty area. 918 * 919 * While it is possible to merge discontiguous writes due to 920 * our having a B_CACHE buffer ( and thus valid read data 921 * for the hole), we don't because it could lead to 922 * significant cache coherency problems with multiple clients, 923 * especially if locking is implemented later on. 924 * 925 * as an optimization we could theoretically maintain 926 * a linked list of discontinuous areas, but we would still 927 * have to commit them separately so there isn't much 928 * advantage to it except perhaps a bit of asynchronization. 929 */ 930 931 if (bp->b_dirtyend > 0 && 932 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 933 if (BUF_WRITE(bp) == EINTR) 934 return (EINTR); 935 goto again; 936 } 937 938 error = uiomove((char *)bp->b_data + on, n, uio); 939 940 /* 941 * Since this block is being modified, it must be written 942 * again and not just committed. Since write clustering does 943 * not work for the stage 1 data write, only the stage 2 944 * commit rpc, we have to clear B_CLUSTEROK as well. 945 */ 946 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 947 948 if (error) { 949 bp->b_ioflags |= BIO_ERROR; 950 brelse(bp); 951 break; 952 } 953 954 /* 955 * Only update dirtyoff/dirtyend if not a degenerate 956 * condition. 957 */ 958 if (n) { 959 if (bp->b_dirtyend > 0) { 960 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 961 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 962 } else { 963 bp->b_dirtyoff = on; 964 bp->b_dirtyend = on + n; 965 } 966 vfs_bio_set_validclean(bp, on, n); 967 } 968 /* 969 * If IO_NOWDRAIN then set B_NOWDRAIN (nfs-backed MD 970 * filesystem) 971 */ 972 if (ioflag & IO_NOWDRAIN) 973 bp->b_flags |= B_NOWDRAIN; 974 975 /* 976 * If IO_SYNC do bwrite(). 977 * 978 * IO_INVAL appears to be unused. The idea appears to be 979 * to turn off caching in this case. Very odd. XXX 980 */ 981 if ((ioflag & IO_SYNC)) { 982 if (ioflag & IO_INVAL) 983 bp->b_flags |= B_NOCACHE; 984 error = BUF_WRITE(bp); 985 if (error) 986 break; 987 } else if ((n + on) == biosize) { 988 bp->b_flags |= B_ASYNC; 989 (void)nfs_writebp(bp, 0, 0); 990 } else { 991 bdwrite(bp); 992 } 993 } while (uio->uio_resid > 0 && n > 0); 994 995 if (haverslock) 996 nfs_rsunlock(np, td); 997 998 return (error); 999} 1000 1001/* 1002 * Get an nfs cache block. 1003 * 1004 * Allocate a new one if the block isn't currently in the cache 1005 * and return the block marked busy. If the calling process is 1006 * interrupted by a signal for an interruptible mount point, return 1007 * NULL. 1008 * 1009 * The caller must carefully deal with the possible B_INVAL state of 1010 * the buffer. nfs_doio() clears B_INVAL (and nfs_asyncio() clears it 1011 * indirectly), so synchronous reads can be issued without worrying about 1012 * the B_INVAL state. We have to be a little more careful when dealing 1013 * with writes (see comments in nfs_write()) when extending a file past 1014 * its EOF. 1015 */ 1016static struct buf * 1017nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td) 1018{ 1019 struct buf *bp; 1020 struct mount *mp; 1021 struct nfsmount *nmp; 1022 1023 mp = vp->v_mount; 1024 nmp = VFSTONFS(mp); 1025 1026 if (nmp->nm_flag & NFSMNT_INT) { 1027 bp = getblk(vp, bn, size, PCATCH, 0); 1028 while (bp == (struct buf *)0) { 1029 if (nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) 1030 return ((struct buf *)0); 1031 bp = getblk(vp, bn, size, 0, 2 * hz); 1032 } 1033 } else { 1034 bp = getblk(vp, bn, size, 0, 0); 1035 } 1036 1037 if (vp->v_type == VREG) { 1038 int biosize; 1039 1040 biosize = mp->mnt_stat.f_iosize; 1041 bp->b_blkno = bn * (biosize / DEV_BSIZE); 1042 } 1043 return (bp); 1044} 1045 1046/* 1047 * Flush and invalidate all dirty buffers. If another process is already 1048 * doing the flush, just wait for completion. 1049 */ 1050int 1051nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, 1052 struct thread *td, int intrflg) 1053{ 1054 struct nfsnode *np = VTONFS(vp); 1055 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1056 int error = 0, slpflag, slptimeo; 1057 1058 if (vp->v_flag & VXLOCK) { 1059 return (0); 1060 } 1061 1062 if ((nmp->nm_flag & NFSMNT_INT) == 0) 1063 intrflg = 0; 1064 if (intrflg) { 1065 slpflag = PCATCH; 1066 slptimeo = 2 * hz; 1067 } else { 1068 slpflag = 0; 1069 slptimeo = 0; 1070 } 1071 /* 1072 * First wait for any other process doing a flush to complete. 1073 */ 1074 while (np->n_flag & NFLUSHINPROG) { 1075 np->n_flag |= NFLUSHWANT; 1076 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", 1077 slptimeo); 1078 if (error && intrflg && 1079 nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) 1080 return (EINTR); 1081 } 1082 1083 /* 1084 * Now, flush as required. 1085 */ 1086 np->n_flag |= NFLUSHINPROG; 1087 error = vinvalbuf(vp, flags, cred, td, slpflag, 0); 1088 while (error) { 1089 if (intrflg && 1090 nfs_sigintr(nmp, (struct nfsreq *)0, td->td_proc)) { 1091 np->n_flag &= ~NFLUSHINPROG; 1092 if (np->n_flag & NFLUSHWANT) { 1093 np->n_flag &= ~NFLUSHWANT; 1094 wakeup((caddr_t)&np->n_flag); 1095 } 1096 return (EINTR); 1097 } 1098 error = vinvalbuf(vp, flags, cred, td, 0, slptimeo); 1099 } 1100 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 1101 if (np->n_flag & NFLUSHWANT) { 1102 np->n_flag &= ~NFLUSHWANT; 1103 wakeup((caddr_t)&np->n_flag); 1104 } 1105 return (0); 1106} 1107 1108/* 1109 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 1110 * This is mainly to avoid queueing async I/O requests when the nfsiods 1111 * are all hung on a dead server. 1112 * 1113 * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp 1114 * is eventually dequeued by the async daemon, nfs_doio() *will*. 1115 */ 1116int 1117nfs_asyncio(struct buf *bp, struct ucred *cred, struct thread *td) 1118{ 1119 struct nfsmount *nmp; 1120 int iod; 1121 int gotiod; 1122 int slpflag = 0; 1123 int slptimeo = 0; 1124 int error; 1125 1126 nmp = VFSTONFS(bp->b_vp->v_mount); 1127 1128 /* 1129 * Commits are usually short and sweet so lets save some cpu and 1130 * leave the async daemons for more important rpc's (such as reads 1131 * and writes). 1132 */ 1133 if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) && 1134 (nmp->nm_bufqiods > nfs_numasync / 2)) { 1135 return(EIO); 1136 } 1137 1138again: 1139 if (nmp->nm_flag & NFSMNT_INT) 1140 slpflag = PCATCH; 1141 gotiod = FALSE; 1142 1143 /* 1144 * Find a free iod to process this request. 1145 */ 1146 for (iod = 0; iod < nfs_numasync; iod++) 1147 if (nfs_iodwant[iod]) { 1148 gotiod = TRUE; 1149 break; 1150 } 1151 1152 /* 1153 * Try to create one if none are free. 1154 */ 1155 if (!gotiod) { 1156 iod = nfs_nfsiodnew(); 1157 if (iod != -1) 1158 gotiod = TRUE; 1159 } 1160 1161 if (gotiod) { 1162 /* 1163 * Found one, so wake it up and tell it which 1164 * mount to process. 1165 */ 1166 NFS_DPF(ASYNCIO, ("nfs_asyncio: waking iod %d for mount %p\n", 1167 iod, nmp)); 1168 nfs_iodwant[iod] = (struct proc *)0; 1169 nfs_iodmount[iod] = nmp; 1170 nmp->nm_bufqiods++; 1171 wakeup((caddr_t)&nfs_iodwant[iod]); 1172 } 1173 1174 /* 1175 * If none are free, we may already have an iod working on this mount 1176 * point. If so, it will process our request. 1177 */ 1178 if (!gotiod) { 1179 if (nmp->nm_bufqiods > 0) { 1180 NFS_DPF(ASYNCIO, 1181 ("nfs_asyncio: %d iods are already processing mount %p\n", 1182 nmp->nm_bufqiods, nmp)); 1183 gotiod = TRUE; 1184 } 1185 } 1186 1187 /* 1188 * If we have an iod which can process the request, then queue 1189 * the buffer. 1190 */ 1191 if (gotiod) { 1192 /* 1193 * Ensure that the queue never grows too large. We still want 1194 * to asynchronize so we block rather then return EIO. 1195 */ 1196 while (nmp->nm_bufqlen >= 2*nfs_numasync) { 1197 NFS_DPF(ASYNCIO, 1198 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 1199 nmp->nm_bufqwant = TRUE; 1200 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO, 1201 "nfsaio", slptimeo); 1202 if (error) { 1203 if (nfs_sigintr(nmp, NULL, td ? td->td_proc : NULL)) 1204 return (EINTR); 1205 if (slpflag == PCATCH) { 1206 slpflag = 0; 1207 slptimeo = 2 * hz; 1208 } 1209 } 1210 /* 1211 * We might have lost our iod while sleeping, 1212 * so check and loop if nescessary. 1213 */ 1214 if (nmp->nm_bufqiods == 0) { 1215 NFS_DPF(ASYNCIO, 1216 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1217 goto again; 1218 } 1219 } 1220 1221 if (bp->b_iocmd == BIO_READ) { 1222 if (bp->b_rcred == NOCRED && cred != NOCRED) 1223 bp->b_rcred = crhold(cred); 1224 } else { 1225 bp->b_flags |= B_WRITEINPROG; 1226 if (bp->b_wcred == NOCRED && cred != NOCRED) 1227 bp->b_wcred = crhold(cred); 1228 } 1229 1230 BUF_KERNPROC(bp); 1231 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1232 nmp->nm_bufqlen++; 1233 return (0); 1234 } 1235 1236 /* 1237 * All the iods are busy on other mounts, so return EIO to 1238 * force the caller to process the i/o synchronously. 1239 */ 1240 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 1241 return (EIO); 1242} 1243 1244/* 1245 * Do an I/O operation to/from a cache block. This may be called 1246 * synchronously or from an nfsiod. 1247 */ 1248int 1249nfs_doio(struct buf *bp, struct ucred *cr, struct thread *td) 1250{ 1251 struct uio *uiop; 1252 struct vnode *vp; 1253 struct nfsnode *np; 1254 struct nfsmount *nmp; 1255 int error = 0, iomode, must_commit = 0; 1256 struct uio uio; 1257 struct iovec io; 1258 struct proc *p = td ? td->td_proc : NULL; 1259 1260 vp = bp->b_vp; 1261 np = VTONFS(vp); 1262 nmp = VFSTONFS(vp->v_mount); 1263 uiop = &uio; 1264 uiop->uio_iov = &io; 1265 uiop->uio_iovcnt = 1; 1266 uiop->uio_segflg = UIO_SYSSPACE; 1267 uiop->uio_td = td; 1268 1269 /* 1270 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We 1271 * do this here so we do not have to do it in all the code that 1272 * calls us. 1273 */ 1274 bp->b_flags &= ~B_INVAL; 1275 bp->b_ioflags &= ~BIO_ERROR; 1276 1277 KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp)); 1278 1279 /* 1280 * Historically, paging was done with physio, but no more. 1281 */ 1282 if (bp->b_flags & B_PHYS) { 1283 /* 1284 * ...though reading /dev/drum still gets us here. 1285 */ 1286 io.iov_len = uiop->uio_resid = bp->b_bcount; 1287 /* mapping was done by vmapbuf() */ 1288 io.iov_base = bp->b_data; 1289 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1290 if (bp->b_iocmd == BIO_READ) { 1291 uiop->uio_rw = UIO_READ; 1292 nfsstats.read_physios++; 1293 error = nfs_readrpc(vp, uiop, cr); 1294 } else { 1295 int com; 1296 1297 iomode = NFSV3WRITE_DATASYNC; 1298 uiop->uio_rw = UIO_WRITE; 1299 nfsstats.write_physios++; 1300 error = nfs_writerpc(vp, uiop, cr, &iomode, &com); 1301 } 1302 if (error) { 1303 bp->b_ioflags |= BIO_ERROR; 1304 bp->b_error = error; 1305 } 1306 } else if (bp->b_iocmd == BIO_READ) { 1307 io.iov_len = uiop->uio_resid = bp->b_bcount; 1308 io.iov_base = bp->b_data; 1309 uiop->uio_rw = UIO_READ; 1310 1311 switch (vp->v_type) { 1312 case VREG: 1313 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1314 nfsstats.read_bios++; 1315 error = nfs_readrpc(vp, uiop, cr); 1316 1317 if (!error) { 1318 if (uiop->uio_resid) { 1319 /* 1320 * If we had a short read with no error, we must have 1321 * hit a file hole. We should zero-fill the remainder. 1322 * This can also occur if the server hits the file EOF. 1323 * 1324 * Holes used to be able to occur due to pending 1325 * writes, but that is not possible any longer. 1326 */ 1327 int nread = bp->b_bcount - uiop->uio_resid; 1328 int left = uiop->uio_resid; 1329 1330 if (left > 0) 1331 bzero((char *)bp->b_data + nread, left); 1332 uiop->uio_resid = 0; 1333 } 1334 } 1335 if (p && (vp->v_flag & VTEXT) && 1336 (np->n_mtime != np->n_vattr.va_mtime.tv_sec)) { 1337 uprintf("Process killed due to text file modification\n"); 1338 PROC_LOCK(p); 1339 psignal(p, SIGKILL); 1340 _PHOLD(p); 1341 PROC_UNLOCK(p); 1342 } 1343 break; 1344 case VLNK: 1345 uiop->uio_offset = (off_t)0; 1346 nfsstats.readlink_bios++; 1347 error = nfs_readlinkrpc(vp, uiop, cr); 1348 break; 1349 case VDIR: 1350 nfsstats.readdir_bios++; 1351 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1352 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 1353 error = nfs_readdirplusrpc(vp, uiop, cr); 1354 if (error == NFSERR_NOTSUPP) 1355 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1356 } 1357 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1358 error = nfs_readdirrpc(vp, uiop, cr); 1359 /* 1360 * end-of-directory sets B_INVAL but does not generate an 1361 * error. 1362 */ 1363 if (error == 0 && uiop->uio_resid == bp->b_bcount) 1364 bp->b_flags |= B_INVAL; 1365 break; 1366 default: 1367 printf("nfs_doio: type %x unexpected\n", vp->v_type); 1368 break; 1369 }; 1370 if (error) { 1371 bp->b_ioflags |= BIO_ERROR; 1372 bp->b_error = error; 1373 } 1374 } else { 1375 /* 1376 * If we only need to commit, try to commit 1377 */ 1378 if (bp->b_flags & B_NEEDCOMMIT) { 1379 int retv; 1380 off_t off; 1381 1382 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 1383 bp->b_flags |= B_WRITEINPROG; 1384 retv = nfs_commit( 1385 bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff, 1386 bp->b_wcred, td); 1387 bp->b_flags &= ~B_WRITEINPROG; 1388 if (retv == 0) { 1389 bp->b_dirtyoff = bp->b_dirtyend = 0; 1390 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1391 bp->b_resid = 0; 1392 bufdone(bp); 1393 return (0); 1394 } 1395 if (retv == NFSERR_STALEWRITEVERF) { 1396 nfs_clearcommit(bp->b_vp->v_mount); 1397 } 1398 } 1399 1400 /* 1401 * Setup for actual write 1402 */ 1403 1404 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size) 1405 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE; 1406 1407 if (bp->b_dirtyend > bp->b_dirtyoff) { 1408 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1409 - bp->b_dirtyoff; 1410 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE 1411 + bp->b_dirtyoff; 1412 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1413 uiop->uio_rw = UIO_WRITE; 1414 nfsstats.write_bios++; 1415 1416 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1417 iomode = NFSV3WRITE_UNSTABLE; 1418 else 1419 iomode = NFSV3WRITE_FILESYNC; 1420 1421 bp->b_flags |= B_WRITEINPROG; 1422 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit); 1423 1424 /* 1425 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try 1426 * to cluster the buffers needing commit. This will allow 1427 * the system to submit a single commit rpc for the whole 1428 * cluster. We can do this even if the buffer is not 100% 1429 * dirty (relative to the NFS blocksize), so we optimize the 1430 * append-to-file-case. 1431 * 1432 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be 1433 * cleared because write clustering only works for commit 1434 * rpc's, not for the data portion of the write). 1435 */ 1436 1437 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1438 bp->b_flags |= B_NEEDCOMMIT; 1439 if (bp->b_dirtyoff == 0 1440 && bp->b_dirtyend == bp->b_bcount) 1441 bp->b_flags |= B_CLUSTEROK; 1442 } else { 1443 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 1444 } 1445 bp->b_flags &= ~B_WRITEINPROG; 1446 1447 /* 1448 * For an interrupted write, the buffer is still valid 1449 * and the write hasn't been pushed to the server yet, 1450 * so we can't set BIO_ERROR and report the interruption 1451 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1452 * is not relevant, so the rpc attempt is essentially 1453 * a noop. For the case of a V3 write rpc not being 1454 * committed to stable storage, the block is still 1455 * dirty and requires either a commit rpc or another 1456 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1457 * the block is reused. This is indicated by setting 1458 * the B_DELWRI and B_NEEDCOMMIT flags. 1459 * 1460 * If the buffer is marked B_PAGING, it does not reside on 1461 * the vp's paging queues so we cannot call bdirty(). The 1462 * bp in this case is not an NFS cache block so we should 1463 * be safe. XXX 1464 */ 1465 if (error == EINTR 1466 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1467 int s; 1468 1469 s = splbio(); 1470 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1471 if ((bp->b_flags & B_PAGING) == 0) { 1472 bdirty(bp); 1473 bp->b_flags &= ~B_DONE; 1474 } 1475 if (error && (bp->b_flags & B_ASYNC) == 0) 1476 bp->b_flags |= B_EINTR; 1477 splx(s); 1478 } else { 1479 if (error) { 1480 bp->b_ioflags |= BIO_ERROR; 1481 bp->b_error = np->n_error = error; 1482 np->n_flag |= NWRITEERR; 1483 } 1484 bp->b_dirtyoff = bp->b_dirtyend = 0; 1485 } 1486 } else { 1487 bp->b_resid = 0; 1488 bufdone(bp); 1489 return (0); 1490 } 1491 } 1492 bp->b_resid = uiop->uio_resid; 1493 if (must_commit) 1494 nfs_clearcommit(vp->v_mount); 1495 bufdone(bp); 1496 return (error); 1497} 1498 1499/* 1500 * Used to aid in handling ftruncate() operations on the NFS client side. 1501 * Truncation creates a number of special problems for NFS. We have to 1502 * throw away VM pages and buffer cache buffers that are beyond EOF, and 1503 * we have to properly handle VM pages or (potentially dirty) buffers 1504 * that straddle the truncation point. 1505 */ 1506 1507int 1508nfs_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize) 1509{ 1510 struct nfsnode *np = VTONFS(vp); 1511 u_quad_t tsize = np->n_size; 1512 int biosize = vp->v_mount->mnt_stat.f_iosize; 1513 int error = 0; 1514 1515 np->n_size = nsize; 1516 1517 if (np->n_size < tsize) { 1518 struct buf *bp; 1519 daddr_t lbn; 1520 int bufsize; 1521 1522 /* 1523 * vtruncbuf() doesn't get the buffer overlapping the 1524 * truncation point. We may have a B_DELWRI and/or B_CACHE 1525 * buffer that now needs to be truncated. 1526 */ 1527 error = vtruncbuf(vp, cred, td, nsize, biosize); 1528 lbn = nsize / biosize; 1529 bufsize = nsize & (biosize - 1); 1530 bp = nfs_getcacheblk(vp, lbn, bufsize, td); 1531 if (bp->b_dirtyoff > bp->b_bcount) 1532 bp->b_dirtyoff = bp->b_bcount; 1533 if (bp->b_dirtyend > bp->b_bcount) 1534 bp->b_dirtyend = bp->b_bcount; 1535 bp->b_flags |= B_RELBUF; /* don't leave garbage around */ 1536 brelse(bp); 1537 } else { 1538 vnode_pager_setsize(vp, nsize); 1539 } 1540 return(error); 1541} 1542 1543