nfs_bio.c revision 34266
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 37 * $Id: nfs_bio.c,v 1.52 1998/03/07 21:36:01 dyson Exp $ 38 */ 39 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/resourcevar.h> 44#include <sys/signalvar.h> 45#include <sys/proc.h> 46#include <sys/buf.h> 47#include <sys/vnode.h> 48#include <sys/mount.h> 49#include <sys/kernel.h> 50 51#include <vm/vm.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_prot.h> 54#include <vm/vm_page.h> 55#include <vm/vm_object.h> 56#include <vm/vm_pager.h> 57#include <vm/vnode_pager.h> 58 59#include <nfs/rpcv2.h> 60#include <nfs/nfsproto.h> 61#include <nfs/nfs.h> 62#include <nfs/nfsmount.h> 63#include <nfs/nqnfs.h> 64#include <nfs/nfsnode.h> 65 66static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size, 67 struct proc *p)); 68static void nfs_prot_buf __P((struct buf *bp, int off, int n)); 69 70extern int nfs_numasync; 71extern struct nfsstats nfsstats; 72 73/* 74 * Vnode op for VM getpages. 75 */ 76int 77nfs_getpages(ap) 78 struct vop_getpages_args *ap; 79{ 80 int i, error, nextoff, size, toff, npages; 81 struct uio uio; 82 struct iovec iov; 83 vm_page_t m; 84 vm_offset_t kva; 85 struct buf *bp; 86 87 if ((ap->a_vp->v_object) == NULL) { 88 printf("nfs_getpages: called with non-merged cache vnode??\n"); 89 return EOPNOTSUPP; 90 } 91 92 /* 93 * We use only the kva address for the buffer, but this is extremely 94 * convienient and fast. 95 */ 96 bp = getpbuf(); 97 98 npages = btoc(ap->a_count); 99 kva = (vm_offset_t) bp->b_data; 100 pmap_qenter(kva, ap->a_m, npages); 101 102 iov.iov_base = (caddr_t) kva; 103 iov.iov_len = ap->a_count; 104 uio.uio_iov = &iov; 105 uio.uio_iovcnt = 1; 106 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex); 107 uio.uio_resid = ap->a_count; 108 uio.uio_segflg = UIO_SYSSPACE; 109 uio.uio_rw = UIO_READ; 110 uio.uio_procp = curproc; 111 112 error = nfs_readrpc(ap->a_vp, &uio, curproc->p_ucred); 113 pmap_qremove(kva, npages); 114 115 relpbuf(bp); 116 117 if (error && (uio.uio_resid == ap->a_count)) 118 return VM_PAGER_ERROR; 119 120 size = ap->a_count - uio.uio_resid; 121 122 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 123 vm_page_t m; 124 nextoff = toff + PAGE_SIZE; 125 m = ap->a_m[i]; 126 127 m->flags &= ~PG_ZERO; 128 129 if (nextoff <= size) { 130 m->valid = VM_PAGE_BITS_ALL; 131 m->dirty = 0; 132 } else { 133 int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1); 134 vm_page_set_validclean(m, 0, nvalid); 135 } 136 137 if (i != ap->a_reqpage) { 138 /* 139 * Whether or not to leave the page activated is up in 140 * the air, but we should put the page on a page queue 141 * somewhere (it already is in the object). Result: 142 * It appears that emperical results show that 143 * deactivating pages is best. 144 */ 145 146 /* 147 * Just in case someone was asking for this page we 148 * now tell them that it is ok to use. 149 */ 150 if (!error) { 151 if (m->flags & PG_WANTED) 152 vm_page_activate(m); 153 else 154 vm_page_deactivate(m); 155 PAGE_WAKEUP(m); 156 } else { 157 vnode_pager_freepage(m); 158 } 159 } 160 } 161 return 0; 162} 163 164/* 165 * Vnode op for VM putpages. 166 */ 167int 168nfs_putpages(ap) 169 struct vop_putpages_args *ap; 170{ 171 struct uio uio; 172 struct iovec iov; 173 vm_page_t m; 174 vm_offset_t kva; 175 struct buf *bp; 176 int iomode, must_commit, i, error, npages; 177 int *rtvals; 178 179 rtvals = ap->a_rtvals; 180 181 npages = btoc(ap->a_count); 182 183 for (i = 0; i < npages; i++) { 184 rtvals[i] = VM_PAGER_AGAIN; 185 } 186 187 /* 188 * We use only the kva address for the buffer, but this is extremely 189 * convienient and fast. 190 */ 191 bp = getpbuf(); 192 193 kva = (vm_offset_t) bp->b_data; 194 pmap_qenter(kva, ap->a_m, npages); 195 196 iov.iov_base = (caddr_t) kva; 197 iov.iov_len = ap->a_count; 198 uio.uio_iov = &iov; 199 uio.uio_iovcnt = 1; 200 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex); 201 uio.uio_resid = ap->a_count; 202 uio.uio_segflg = UIO_SYSSPACE; 203 uio.uio_rw = UIO_WRITE; 204 uio.uio_procp = curproc; 205 206 if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0) 207 iomode = NFSV3WRITE_UNSTABLE; 208 else 209 iomode = NFSV3WRITE_FILESYNC; 210 211 error = nfs_writerpc(ap->a_vp, &uio, 212 curproc->p_ucred, &iomode, &must_commit); 213 214 pmap_qremove(kva, npages); 215 relpbuf(bp); 216 217 if (!error) { 218 int nwritten = round_page(ap->a_count - uio.uio_resid) / PAGE_SIZE; 219 for (i = 0; i < nwritten; i++) { 220 rtvals[i] = VM_PAGER_OK; 221 ap->a_m[i]->dirty = 0; 222 } 223 if (must_commit) 224 nfs_clearcommit(ap->a_vp->v_mount); 225 } 226 return ap->a_rtvals[0]; 227} 228 229/* 230 * Vnode op for read using bio 231 * Any similarity to readip() is purely coincidental 232 */ 233int 234nfs_bioread(vp, uio, ioflag, cred, getpages) 235 register struct vnode *vp; 236 register struct uio *uio; 237 int ioflag; 238 struct ucred *cred; 239 int getpages; 240{ 241 register struct nfsnode *np = VTONFS(vp); 242 register int biosize, diff, i; 243 struct buf *bp = 0, *rabp; 244 struct vattr vattr; 245 struct proc *p; 246 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 247 daddr_t lbn, rabn; 248 int bufsize; 249 int nra, error = 0, n = 0, on = 0, not_readin; 250 251#ifdef DIAGNOSTIC 252 if (uio->uio_rw != UIO_READ) 253 panic("nfs_read mode"); 254#endif 255 if (uio->uio_resid == 0) 256 return (0); 257 if (uio->uio_offset < 0) 258 return (EINVAL); 259 p = uio->uio_procp; 260 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 261 (void)nfs_fsinfo(nmp, vp, cred, p); 262 biosize = vp->v_mount->mnt_stat.f_iosize; 263 /* 264 * For nfs, cache consistency can only be maintained approximately. 265 * Although RFC1094 does not specify the criteria, the following is 266 * believed to be compatible with the reference port. 267 * For nqnfs, full cache consistency is maintained within the loop. 268 * For nfs: 269 * If the file's modify time on the server has changed since the 270 * last read rpc or you have written to the file, 271 * you may have lost data cache consistency with the 272 * server, so flush all of the file's data out of the cache. 273 * Then force a getattr rpc to ensure that you have up to date 274 * attributes. 275 * NB: This implies that cache data can be read when up to 276 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 277 * attributes this could be forced by setting n_attrstamp to 0 before 278 * the VOP_GETATTR() call. 279 */ 280 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) { 281 if (np->n_flag & NMODIFIED) { 282 if (vp->v_type != VREG) { 283 if (vp->v_type != VDIR) 284 panic("nfs: bioread, not dir"); 285 nfs_invaldir(vp); 286 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 287 if (error) 288 return (error); 289 } 290 np->n_attrstamp = 0; 291 error = VOP_GETATTR(vp, &vattr, cred, p); 292 if (error) 293 return (error); 294 np->n_mtime = vattr.va_mtime.tv_sec; 295 } else { 296 error = VOP_GETATTR(vp, &vattr, cred, p); 297 if (error) 298 return (error); 299 if (np->n_mtime != vattr.va_mtime.tv_sec) { 300 if (vp->v_type == VDIR) 301 nfs_invaldir(vp); 302 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 303 if (error) 304 return (error); 305 np->n_mtime = vattr.va_mtime.tv_sec; 306 } 307 } 308 } 309 do { 310 311 /* 312 * Get a valid lease. If cached data is stale, flush it. 313 */ 314 if (nmp->nm_flag & NFSMNT_NQNFS) { 315 if (NQNFS_CKINVALID(vp, np, ND_READ)) { 316 do { 317 error = nqnfs_getlease(vp, ND_READ, cred, p); 318 } while (error == NQNFS_EXPIRED); 319 if (error) 320 return (error); 321 if (np->n_lrev != np->n_brev || 322 (np->n_flag & NQNFSNONCACHE) || 323 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) { 324 if (vp->v_type == VDIR) 325 nfs_invaldir(vp); 326 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 327 if (error) 328 return (error); 329 np->n_brev = np->n_lrev; 330 } 331 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) { 332 nfs_invaldir(vp); 333 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 334 if (error) 335 return (error); 336 } 337 } 338 if (np->n_flag & NQNFSNONCACHE) { 339 switch (vp->v_type) { 340 case VREG: 341 return (nfs_readrpc(vp, uio, cred)); 342 case VLNK: 343 return (nfs_readlinkrpc(vp, uio, cred)); 344 case VDIR: 345 break; 346 default: 347 printf(" NQNFSNONCACHE: type %x unexpected\n", 348 vp->v_type); 349 }; 350 } 351 switch (vp->v_type) { 352 case VREG: 353 nfsstats.biocache_reads++; 354 lbn = uio->uio_offset / biosize; 355 on = uio->uio_offset & (biosize - 1); 356 not_readin = 1; 357 358 /* 359 * Start the read ahead(s), as required. 360 */ 361 if (nfs_numasync > 0 && nmp->nm_readahead > 0) { 362 for (nra = 0; nra < nmp->nm_readahead && 363 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 364 rabn = lbn + 1 + nra; 365 if (!incore(vp, rabn)) { 366 rabp = nfs_getcacheblk(vp, rabn, biosize, p); 367 if (!rabp) 368 return (EINTR); 369 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 370 rabp->b_flags |= (B_READ | B_ASYNC); 371 vfs_busy_pages(rabp, 0); 372 if (nfs_asyncio(rabp, cred)) { 373 rabp->b_flags |= B_INVAL|B_ERROR; 374 vfs_unbusy_pages(rabp); 375 brelse(rabp); 376 } 377 } else 378 brelse(rabp); 379 } 380 } 381 } 382 383 /* 384 * If the block is in the cache and has the required data 385 * in a valid region, just copy it out. 386 * Otherwise, get the block and write back/read in, 387 * as required. 388 */ 389again: 390 bufsize = biosize; 391 if ((off_t)(lbn + 1) * biosize > np->n_size && 392 (off_t)(lbn + 1) * biosize - np->n_size < biosize) { 393 bufsize = np->n_size - lbn * biosize; 394 bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 395 } 396 bp = nfs_getcacheblk(vp, lbn, bufsize, p); 397 if (!bp) 398 return (EINTR); 399 /* 400 * If we are being called from nfs_getpages, we must 401 * make sure the buffer is a vmio buffer. The vp will 402 * already be setup for vmio but there may be some old 403 * non-vmio buffers attached to it. 404 */ 405 if (getpages && !(bp->b_flags & B_VMIO)) { 406#ifdef DIAGNOSTIC 407 printf("nfs_bioread: non vmio buf found, discarding\n"); 408#endif 409 bp->b_flags |= B_NOCACHE; 410 bp->b_flags |= B_INVAFTERWRITE; 411 if (bp->b_dirtyend > 0) { 412 if ((bp->b_flags & B_DELWRI) == 0) 413 panic("nfsbioread"); 414 if (VOP_BWRITE(bp) == EINTR) 415 return (EINTR); 416 } else 417 brelse(bp); 418 goto again; 419 } 420 if ((bp->b_flags & B_CACHE) == 0) { 421 bp->b_flags |= B_READ; 422 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 423 not_readin = 0; 424 vfs_busy_pages(bp, 0); 425 error = nfs_doio(bp, cred, p); 426 if (error) { 427 brelse(bp); 428 return (error); 429 } 430 } 431 if (bufsize > on) { 432 n = min((unsigned)(bufsize - on), uio->uio_resid); 433 } else { 434 n = 0; 435 } 436 diff = np->n_size - uio->uio_offset; 437 if (diff < n) 438 n = diff; 439 if (not_readin && n > 0) { 440 if (on < bp->b_validoff || (on + n) > bp->b_validend) { 441 bp->b_flags |= B_NOCACHE; 442 bp->b_flags |= B_INVAFTERWRITE; 443 if (bp->b_dirtyend > 0) { 444 if ((bp->b_flags & B_DELWRI) == 0) 445 panic("nfsbioread"); 446 if (VOP_BWRITE(bp) == EINTR) 447 return (EINTR); 448 } else 449 brelse(bp); 450 goto again; 451 } 452 } 453 vp->v_lastr = lbn; 454 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on); 455 if (diff < n) 456 n = diff; 457 break; 458 case VLNK: 459 nfsstats.biocache_readlinks++; 460 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p); 461 if (!bp) 462 return (EINTR); 463 if ((bp->b_flags & B_CACHE) == 0) { 464 bp->b_flags |= B_READ; 465 vfs_busy_pages(bp, 0); 466 error = nfs_doio(bp, cred, p); 467 if (error) { 468 bp->b_flags |= B_ERROR; 469 brelse(bp); 470 return (error); 471 } 472 } 473 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 474 on = 0; 475 break; 476 case VDIR: 477 nfsstats.biocache_readdirs++; 478 if (np->n_direofoffset 479 && uio->uio_offset >= np->n_direofoffset) { 480 return (0); 481 } 482 lbn = uio->uio_offset / NFS_DIRBLKSIZ; 483 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 484 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, p); 485 if (!bp) 486 return (EINTR); 487 if ((bp->b_flags & B_CACHE) == 0) { 488 bp->b_flags |= B_READ; 489 vfs_busy_pages(bp, 0); 490 error = nfs_doio(bp, cred, p); 491 if (error) { 492 brelse(bp); 493 } 494 while (error == NFSERR_BAD_COOKIE) { 495 nfs_invaldir(vp); 496 error = nfs_vinvalbuf(vp, 0, cred, p, 1); 497 /* 498 * Yuck! The directory has been modified on the 499 * server. The only way to get the block is by 500 * reading from the beginning to get all the 501 * offset cookies. 502 */ 503 for (i = 0; i <= lbn && !error; i++) { 504 if (np->n_direofoffset 505 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 506 return (0); 507 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, p); 508 if (!bp) 509 return (EINTR); 510 if ((bp->b_flags & B_DONE) == 0) { 511 bp->b_flags |= B_READ; 512 vfs_busy_pages(bp, 0); 513 error = nfs_doio(bp, cred, p); 514 if (error) { 515 brelse(bp); 516 } else if (i < lbn) { 517 brelse(bp); 518 } 519 } 520 } 521 } 522 if (error) 523 return (error); 524 } 525 526 /* 527 * If not eof and read aheads are enabled, start one. 528 * (You need the current block first, so that you have the 529 * directory offset cookie of the next block.) 530 */ 531 if (nfs_numasync > 0 && nmp->nm_readahead > 0 && 532 (np->n_direofoffset == 0 || 533 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 534 !(np->n_flag & NQNFSNONCACHE) && 535 !incore(vp, lbn + 1)) { 536 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, p); 537 if (rabp) { 538 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 539 rabp->b_flags |= (B_READ | B_ASYNC); 540 vfs_busy_pages(rabp, 0); 541 if (nfs_asyncio(rabp, cred)) { 542 rabp->b_flags |= B_INVAL|B_ERROR; 543 vfs_unbusy_pages(rabp); 544 brelse(rabp); 545 } 546 } else { 547 brelse(rabp); 548 } 549 } 550 } 551 /* 552 * Make sure we use a signed variant of min() since 553 * the second term may be negative. 554 */ 555 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 556 break; 557 default: 558 printf(" nfs_bioread: type %x unexpected\n",vp->v_type); 559 break; 560 }; 561 562 if (n > 0) { 563 error = uiomove(bp->b_data + on, (int)n, uio); 564 } 565 switch (vp->v_type) { 566 case VREG: 567 break; 568 case VLNK: 569 n = 0; 570 break; 571 case VDIR: 572 if (np->n_flag & NQNFSNONCACHE) 573 bp->b_flags |= B_INVAL; 574 break; 575 default: 576 printf(" nfs_bioread: type %x unexpected\n",vp->v_type); 577 } 578 brelse(bp); 579 } while (error == 0 && uio->uio_resid > 0 && n > 0); 580 return (error); 581} 582 583static void 584nfs_prot_buf(bp, off, n) 585 struct buf *bp; 586 int off; 587 int n; 588{ 589 int pindex, boff, end; 590 591 if ((bp->b_flags & B_VMIO) == 0) 592 return; 593 594 end = round_page(off + n); 595 for (boff = trunc_page(off); boff < end; boff += PAGE_SIZE) { 596 pindex = boff >> PAGE_SHIFT; 597 vm_page_protect(bp->b_pages[pindex], VM_PROT_NONE); 598 } 599} 600 601/* 602 * Vnode op for write using bio 603 */ 604int 605nfs_write(ap) 606 struct vop_write_args /* { 607 struct vnode *a_vp; 608 struct uio *a_uio; 609 int a_ioflag; 610 struct ucred *a_cred; 611 } */ *ap; 612{ 613 register int biosize; 614 register struct uio *uio = ap->a_uio; 615 struct proc *p = uio->uio_procp; 616 register struct vnode *vp = ap->a_vp; 617 struct nfsnode *np = VTONFS(vp); 618 register struct ucred *cred = ap->a_cred; 619 int ioflag = ap->a_ioflag; 620 struct buf *bp; 621 struct vattr vattr; 622 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 623 daddr_t lbn; 624 int bufsize; 625 int n, on, error = 0, iomode, must_commit; 626 627#ifdef DIAGNOSTIC 628 if (uio->uio_rw != UIO_WRITE) 629 panic("nfs_write mode"); 630 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 631 panic("nfs_write proc"); 632#endif 633 if (vp->v_type != VREG) 634 return (EIO); 635 if (np->n_flag & NWRITEERR) { 636 np->n_flag &= ~NWRITEERR; 637 return (np->n_error); 638 } 639 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 640 (void)nfs_fsinfo(nmp, vp, cred, p); 641 if (ioflag & (IO_APPEND | IO_SYNC)) { 642 if (np->n_flag & NMODIFIED) { 643 np->n_attrstamp = 0; 644 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 645 if (error) 646 return (error); 647 } 648 if (ioflag & IO_APPEND) { 649 np->n_attrstamp = 0; 650 error = VOP_GETATTR(vp, &vattr, cred, p); 651 if (error) 652 return (error); 653 uio->uio_offset = np->n_size; 654 } 655 } 656 if (uio->uio_offset < 0) 657 return (EINVAL); 658 if (uio->uio_resid == 0) 659 return (0); 660 /* 661 * Maybe this should be above the vnode op call, but so long as 662 * file servers have no limits, i don't think it matters 663 */ 664 if (p && uio->uio_offset + uio->uio_resid > 665 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 666 psignal(p, SIGXFSZ); 667 return (EFBIG); 668 } 669 /* 670 * I use nm_rsize, not nm_wsize so that all buffer cache blocks 671 * will be the same size within a filesystem. nfs_writerpc will 672 * still use nm_wsize when sizing the rpc's. 673 */ 674 biosize = vp->v_mount->mnt_stat.f_iosize; 675 do { 676 /* 677 * Check for a valid write lease. 678 */ 679 if ((nmp->nm_flag & NFSMNT_NQNFS) && 680 NQNFS_CKINVALID(vp, np, ND_WRITE)) { 681 do { 682 error = nqnfs_getlease(vp, ND_WRITE, cred, p); 683 } while (error == NQNFS_EXPIRED); 684 if (error) 685 return (error); 686 if (np->n_lrev != np->n_brev || 687 (np->n_flag & NQNFSNONCACHE)) { 688 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 689 if (error) 690 return (error); 691 np->n_brev = np->n_lrev; 692 } 693 } 694 if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) { 695 iomode = NFSV3WRITE_FILESYNC; 696 error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit); 697 if (must_commit) 698 nfs_clearcommit(vp->v_mount); 699 return (error); 700 } 701 nfsstats.biocache_writes++; 702 lbn = uio->uio_offset / biosize; 703 on = uio->uio_offset & (biosize-1); 704 n = min((unsigned)(biosize - on), uio->uio_resid); 705again: 706 if (uio->uio_offset + n > np->n_size) { 707 np->n_size = uio->uio_offset + n; 708 np->n_flag |= NMODIFIED; 709 vnode_pager_setsize(vp, (u_long)np->n_size); 710 } 711 bufsize = biosize; 712 if ((lbn + 1) * biosize > np->n_size) { 713 bufsize = np->n_size - lbn * biosize; 714 bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 715 } 716 bp = nfs_getcacheblk(vp, lbn, bufsize, p); 717 if (!bp) 718 return (EINTR); 719 if (bp->b_wcred == NOCRED) { 720 crhold(cred); 721 bp->b_wcred = cred; 722 } 723 np->n_flag |= NMODIFIED; 724 725 if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) { 726 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); 727 } 728 729 /* 730 * If the new write will leave a contiguous dirty 731 * area, just update the b_dirtyoff and b_dirtyend, 732 * otherwise force a write rpc of the old dirty area. 733 */ 734 if (bp->b_dirtyend > 0 && 735 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 736 bp->b_proc = p; 737 if (VOP_BWRITE(bp) == EINTR) 738 return (EINTR); 739 goto again; 740 } 741 742 /* 743 * Check for valid write lease and get one as required. 744 * In case getblk() and/or bwrite() delayed us. 745 */ 746 if ((nmp->nm_flag & NFSMNT_NQNFS) && 747 NQNFS_CKINVALID(vp, np, ND_WRITE)) { 748 do { 749 error = nqnfs_getlease(vp, ND_WRITE, cred, p); 750 } while (error == NQNFS_EXPIRED); 751 if (error) { 752 brelse(bp); 753 return (error); 754 } 755 if (np->n_lrev != np->n_brev || 756 (np->n_flag & NQNFSNONCACHE)) { 757 brelse(bp); 758 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 759 if (error) 760 return (error); 761 np->n_brev = np->n_lrev; 762 goto again; 763 } 764 } 765 766 error = uiomove((char *)bp->b_data + on, n, uio); 767 if (error) { 768 bp->b_flags |= B_ERROR; 769 brelse(bp); 770 return (error); 771 } 772 773 /* 774 * This will keep the buffer and mmaped regions more coherent. 775 */ 776 nfs_prot_buf(bp, on, n); 777 778 if (bp->b_dirtyend > 0) { 779 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 780 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 781 } else { 782 bp->b_dirtyoff = on; 783 bp->b_dirtyend = on + n; 784 } 785 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff || 786 bp->b_validoff > bp->b_dirtyend) { 787 bp->b_validoff = bp->b_dirtyoff; 788 bp->b_validend = bp->b_dirtyend; 789 } else { 790 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 791 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 792 } 793 794 /* 795 * Since this block is being modified, it must be written 796 * again and not just committed. 797 */ 798 bp->b_flags &= ~B_NEEDCOMMIT; 799 800 /* 801 * If the lease is non-cachable or IO_SYNC do bwrite(). 802 */ 803 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) { 804 bp->b_proc = p; 805 if (ioflag & IO_INVAL) 806 bp->b_flags |= B_INVAL; 807 error = VOP_BWRITE(bp); 808 if (error) 809 return (error); 810 if (np->n_flag & NQNFSNONCACHE) { 811 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 812 if (error) 813 return (error); 814 } 815 } else if ((n + on) == biosize && 816 (nmp->nm_flag & NFSMNT_NQNFS) == 0) { 817 bp->b_proc = (struct proc *)0; 818 bp->b_flags |= B_ASYNC; 819 (void)nfs_writebp(bp, 0); 820 } else 821 bdwrite(bp); 822 } while (uio->uio_resid > 0 && n > 0); 823 return (0); 824} 825 826/* 827 * Get an nfs cache block. 828 * Allocate a new one if the block isn't currently in the cache 829 * and return the block marked busy. If the calling process is 830 * interrupted by a signal for an interruptible mount point, return 831 * NULL. 832 */ 833static struct buf * 834nfs_getcacheblk(vp, bn, size, p) 835 struct vnode *vp; 836 daddr_t bn; 837 int size; 838 struct proc *p; 839{ 840 register struct buf *bp; 841 struct mount *mp; 842 struct nfsmount *nmp; 843 844 mp = vp->v_mount; 845 nmp = VFSTONFS(mp); 846 847 if (nmp->nm_flag & NFSMNT_INT) { 848 bp = getblk(vp, bn, size, PCATCH, 0); 849 while (bp == (struct buf *)0) { 850 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) 851 return ((struct buf *)0); 852 bp = getblk(vp, bn, size, 0, 2 * hz); 853 } 854 } else 855 bp = getblk(vp, bn, size, 0, 0); 856 857 if( vp->v_type == VREG) { 858 int biosize; 859 biosize = mp->mnt_stat.f_iosize; 860 bp->b_blkno = (bn * biosize) / DEV_BSIZE; 861 } 862 863 return (bp); 864} 865 866/* 867 * Flush and invalidate all dirty buffers. If another process is already 868 * doing the flush, just wait for completion. 869 */ 870int 871nfs_vinvalbuf(vp, flags, cred, p, intrflg) 872 struct vnode *vp; 873 int flags; 874 struct ucred *cred; 875 struct proc *p; 876 int intrflg; 877{ 878 register struct nfsnode *np = VTONFS(vp); 879 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 880 int error = 0, slpflag, slptimeo; 881 882 if (vp->v_flag & VXLOCK) { 883 return (0); 884 } 885 886 if ((nmp->nm_flag & NFSMNT_INT) == 0) 887 intrflg = 0; 888 if (intrflg) { 889 slpflag = PCATCH; 890 slptimeo = 2 * hz; 891 } else { 892 slpflag = 0; 893 slptimeo = 0; 894 } 895 /* 896 * First wait for any other process doing a flush to complete. 897 */ 898 while (np->n_flag & NFLUSHINPROG) { 899 np->n_flag |= NFLUSHWANT; 900 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", 901 slptimeo); 902 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) 903 return (EINTR); 904 } 905 906 /* 907 * Now, flush as required. 908 */ 909 np->n_flag |= NFLUSHINPROG; 910 error = vinvalbuf(vp, flags, cred, p, slpflag, 0); 911 while (error) { 912 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) { 913 np->n_flag &= ~NFLUSHINPROG; 914 if (np->n_flag & NFLUSHWANT) { 915 np->n_flag &= ~NFLUSHWANT; 916 wakeup((caddr_t)&np->n_flag); 917 } 918 return (EINTR); 919 } 920 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo); 921 } 922 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 923 if (np->n_flag & NFLUSHWANT) { 924 np->n_flag &= ~NFLUSHWANT; 925 wakeup((caddr_t)&np->n_flag); 926 } 927 return (0); 928} 929 930/* 931 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 932 * This is mainly to avoid queueing async I/O requests when the nfsiods 933 * are all hung on a dead server. 934 */ 935int 936nfs_asyncio(bp, cred) 937 register struct buf *bp; 938 struct ucred *cred; 939{ 940 struct nfsmount *nmp; 941 int i; 942 int gotiod; 943 int slpflag = 0; 944 int slptimeo = 0; 945 int error; 946 947 if (nfs_numasync == 0) 948 return (EIO); 949 950 nmp = VFSTONFS(bp->b_vp->v_mount); 951again: 952 if (nmp->nm_flag & NFSMNT_INT) 953 slpflag = PCATCH; 954 gotiod = FALSE; 955 956 /* 957 * Find a free iod to process this request. 958 */ 959 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 960 if (nfs_iodwant[i]) { 961 /* 962 * Found one, so wake it up and tell it which 963 * mount to process. 964 */ 965 NFS_DPF(ASYNCIO, 966 ("nfs_asyncio: waking iod %d for mount %p\n", 967 i, nmp)); 968 nfs_iodwant[i] = (struct proc *)0; 969 nfs_iodmount[i] = nmp; 970 nmp->nm_bufqiods++; 971 wakeup((caddr_t)&nfs_iodwant[i]); 972 gotiod = TRUE; 973 break; 974 } 975 976 /* 977 * If none are free, we may already have an iod working on this mount 978 * point. If so, it will process our request. 979 */ 980 if (!gotiod) { 981 if (nmp->nm_bufqiods > 0) { 982 NFS_DPF(ASYNCIO, 983 ("nfs_asyncio: %d iods are already processing mount %p\n", 984 nmp->nm_bufqiods, nmp)); 985 gotiod = TRUE; 986 } 987 } 988 989 /* 990 * If we have an iod which can process the request, then queue 991 * the buffer. 992 */ 993 if (gotiod) { 994 /* 995 * Ensure that the queue never grows too large. 996 */ 997 while (nmp->nm_bufqlen >= 2*nfs_numasync) { 998 NFS_DPF(ASYNCIO, 999 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 1000 nmp->nm_bufqwant = TRUE; 1001 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO, 1002 "nfsaio", slptimeo); 1003 if (error) { 1004 if (nfs_sigintr(nmp, NULL, bp->b_proc)) 1005 return (EINTR); 1006 if (slpflag == PCATCH) { 1007 slpflag = 0; 1008 slptimeo = 2 * hz; 1009 } 1010 } 1011 /* 1012 * We might have lost our iod while sleeping, 1013 * so check and loop if nescessary. 1014 */ 1015 if (nmp->nm_bufqiods == 0) { 1016 NFS_DPF(ASYNCIO, 1017 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 1018 goto again; 1019 } 1020 } 1021 1022 if (bp->b_flags & B_READ) { 1023 if (bp->b_rcred == NOCRED && cred != NOCRED) { 1024 crhold(cred); 1025 bp->b_rcred = cred; 1026 } 1027 } else { 1028 bp->b_flags |= B_WRITEINPROG; 1029 if (bp->b_wcred == NOCRED && cred != NOCRED) { 1030 crhold(cred); 1031 bp->b_wcred = cred; 1032 } 1033 } 1034 1035 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 1036 nmp->nm_bufqlen++; 1037 return (0); 1038 } 1039 1040 /* 1041 * All the iods are busy on other mounts, so return EIO to 1042 * force the caller to process the i/o synchronously. 1043 */ 1044 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 1045 return (EIO); 1046} 1047 1048/* 1049 * Do an I/O operation to/from a cache block. This may be called 1050 * synchronously or from an nfsiod. 1051 */ 1052int 1053nfs_doio(bp, cr, p) 1054 register struct buf *bp; 1055 struct ucred *cr; 1056 struct proc *p; 1057{ 1058 register struct uio *uiop; 1059 register struct vnode *vp; 1060 struct nfsnode *np; 1061 struct nfsmount *nmp; 1062 int error = 0, diff, len, iomode, must_commit = 0; 1063 struct uio uio; 1064 struct iovec io; 1065 1066 vp = bp->b_vp; 1067 np = VTONFS(vp); 1068 nmp = VFSTONFS(vp->v_mount); 1069 uiop = &uio; 1070 uiop->uio_iov = &io; 1071 uiop->uio_iovcnt = 1; 1072 uiop->uio_segflg = UIO_SYSSPACE; 1073 uiop->uio_procp = p; 1074 1075 /* 1076 * Historically, paging was done with physio, but no more. 1077 */ 1078 if (bp->b_flags & B_PHYS) { 1079 /* 1080 * ...though reading /dev/drum still gets us here. 1081 */ 1082 io.iov_len = uiop->uio_resid = bp->b_bcount; 1083 /* mapping was done by vmapbuf() */ 1084 io.iov_base = bp->b_data; 1085 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1086 if (bp->b_flags & B_READ) { 1087 uiop->uio_rw = UIO_READ; 1088 nfsstats.read_physios++; 1089 error = nfs_readrpc(vp, uiop, cr); 1090 } else { 1091 int com; 1092 1093 iomode = NFSV3WRITE_DATASYNC; 1094 uiop->uio_rw = UIO_WRITE; 1095 nfsstats.write_physios++; 1096 error = nfs_writerpc(vp, uiop, cr, &iomode, &com); 1097 } 1098 if (error) { 1099 bp->b_flags |= B_ERROR; 1100 bp->b_error = error; 1101 } 1102 } else if (bp->b_flags & B_READ) { 1103 io.iov_len = uiop->uio_resid = bp->b_bcount; 1104 io.iov_base = bp->b_data; 1105 uiop->uio_rw = UIO_READ; 1106 switch (vp->v_type) { 1107 case VREG: 1108 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 1109 nfsstats.read_bios++; 1110 error = nfs_readrpc(vp, uiop, cr); 1111 if (!error) { 1112 bp->b_validoff = 0; 1113 if (uiop->uio_resid) { 1114 /* 1115 * If len > 0, there is a hole in the file and 1116 * no writes after the hole have been pushed to 1117 * the server yet. 1118 * Just zero fill the rest of the valid area. 1119 */ 1120 diff = bp->b_bcount - uiop->uio_resid; 1121 len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE 1122 + diff); 1123 if (len > 0) { 1124 len = min(len, uiop->uio_resid); 1125 bzero((char *)bp->b_data + diff, len); 1126 bp->b_validend = diff + len; 1127 } else 1128 bp->b_validend = diff; 1129 } else 1130 bp->b_validend = bp->b_bcount; 1131 } 1132 if (p && (vp->v_flag & VTEXT) && 1133 (((nmp->nm_flag & NFSMNT_NQNFS) && 1134 NQNFS_CKINVALID(vp, np, ND_READ) && 1135 np->n_lrev != np->n_brev) || 1136 (!(nmp->nm_flag & NFSMNT_NQNFS) && 1137 np->n_mtime != np->n_vattr.va_mtime.tv_sec))) { 1138 uprintf("Process killed due to text file modification\n"); 1139 psignal(p, SIGKILL); 1140 p->p_flag |= P_NOSWAP; 1141 } 1142 break; 1143 case VLNK: 1144 uiop->uio_offset = (off_t)0; 1145 nfsstats.readlink_bios++; 1146 error = nfs_readlinkrpc(vp, uiop, cr); 1147 break; 1148 case VDIR: 1149 nfsstats.readdir_bios++; 1150 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1151 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 1152 error = nfs_readdirplusrpc(vp, uiop, cr); 1153 if (error == NFSERR_NOTSUPP) 1154 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1155 } 1156 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1157 error = nfs_readdirrpc(vp, uiop, cr); 1158 break; 1159 default: 1160 printf("nfs_doio: type %x unexpected\n",vp->v_type); 1161 break; 1162 }; 1163 if (error) { 1164 bp->b_flags |= B_ERROR; 1165 bp->b_error = error; 1166 } 1167 } else { 1168 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size) 1169 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); 1170 1171 if (bp->b_dirtyend > bp->b_dirtyoff) { 1172 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1173 - bp->b_dirtyoff; 1174 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE 1175 + bp->b_dirtyoff; 1176 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1177 uiop->uio_rw = UIO_WRITE; 1178 nfsstats.write_bios++; 1179 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1180 iomode = NFSV3WRITE_UNSTABLE; 1181 else 1182 iomode = NFSV3WRITE_FILESYNC; 1183 bp->b_flags |= B_WRITEINPROG; 1184 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit); 1185 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1186 bp->b_flags |= B_NEEDCOMMIT; 1187 if (bp->b_dirtyoff == 0 1188 && bp->b_dirtyend == bp->b_bufsize) 1189 bp->b_flags |= B_CLUSTEROK; 1190 } else 1191 bp->b_flags &= ~B_NEEDCOMMIT; 1192 bp->b_flags &= ~B_WRITEINPROG; 1193 1194 /* 1195 * For an interrupted write, the buffer is still valid 1196 * and the write hasn't been pushed to the server yet, 1197 * so we can't set B_ERROR and report the interruption 1198 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1199 * is not relevant, so the rpc attempt is essentially 1200 * a noop. For the case of a V3 write rpc not being 1201 * committed to stable storage, the block is still 1202 * dirty and requires either a commit rpc or another 1203 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1204 * the block is reused. This is indicated by setting 1205 * the B_DELWRI and B_NEEDCOMMIT flags. 1206 */ 1207 if (error == EINTR 1208 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1209 int s; 1210 1211 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1212 ++numdirtybuffers; 1213 bp->b_flags |= B_DELWRI; 1214 s = splbio(); 1215 reassignbuf(bp, vp); 1216 splx(s); 1217 if ((bp->b_flags & B_ASYNC) == 0) 1218 bp->b_flags |= B_EINTR; 1219 } else { 1220 if (error) { 1221 bp->b_flags |= B_ERROR; 1222 bp->b_error = np->n_error = error; 1223 np->n_flag |= NWRITEERR; 1224 } 1225 bp->b_dirtyoff = bp->b_dirtyend = 0; 1226 } 1227 } else { 1228 bp->b_resid = 0; 1229 biodone(bp); 1230 return (0); 1231 } 1232 } 1233 bp->b_resid = uiop->uio_resid; 1234 if (must_commit) 1235 nfs_clearcommit(vp->v_mount); 1236 biodone(bp); 1237 return (error); 1238} 1239