nfs_bio.c revision 32755
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 37 * $Id: nfs_bio.c,v 1.46 1998/01/06 05:21:38 dyson Exp $ 38 */ 39 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/resourcevar.h> 44#include <sys/signalvar.h> 45#include <sys/proc.h> 46#include <sys/buf.h> 47#include <sys/vnode.h> 48#include <sys/mount.h> 49#include <sys/kernel.h> 50 51#include <vm/vm.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_prot.h> 54#include <vm/vm_page.h> 55#include <vm/vm_object.h> 56#include <vm/vm_pager.h> 57#include <vm/vnode_pager.h> 58 59#include <nfs/rpcv2.h> 60#include <nfs/nfsproto.h> 61#include <nfs/nfs.h> 62#include <nfs/nfsmount.h> 63#include <nfs/nqnfs.h> 64#include <nfs/nfsnode.h> 65 66static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size, 67 struct proc *p)); 68 69extern int nfs_numasync; 70extern struct nfsstats nfsstats; 71 72/* 73 * Vnode op for VM getpages. 74 */ 75int 76nfs_getpages(ap) 77 struct vop_getpages_args *ap; 78{ 79 int i, pcount, error; 80 struct uio uio; 81 struct iovec iov; 82 vm_page_t m; 83 vm_offset_t kva; 84 85 if ((ap->a_vp->v_object) == NULL) { 86 printf("nfs_getpages: called with non-merged cache vnode??\n"); 87 return EOPNOTSUPP; 88 } 89 90 m = ap->a_m[ap->a_reqpage]; 91 kva = vm_pager_map_page(m); 92 93 iov.iov_base = (caddr_t) kva; 94 iov.iov_len = PAGE_SIZE; 95 uio.uio_iov = &iov; 96 uio.uio_iovcnt = 1; 97 uio.uio_offset = IDX_TO_OFF(m->pindex); 98 uio.uio_resid = PAGE_SIZE; 99 uio.uio_segflg = UIO_SYSSPACE; 100 uio.uio_rw = UIO_READ; 101 uio.uio_procp = curproc; 102 103 error = nfs_readrpc(ap->a_vp, &uio, curproc->p_ucred); 104 vm_pager_unmap_page(kva); 105 106 pcount = round_page(ap->a_count) / PAGE_SIZE; 107 for (i = 0; i < pcount; i++) { 108 if (i != ap->a_reqpage) { 109 vnode_pager_freepage(ap->a_m[i]); 110 } 111 } 112 113 if (error && (uio.uio_resid == PAGE_SIZE)) 114 return VM_PAGER_ERROR; 115 return 0; 116} 117 118/* 119 * Vnode op for read using bio 120 * Any similarity to readip() is purely coincidental 121 */ 122int 123nfs_bioread(vp, uio, ioflag, cred, getpages) 124 register struct vnode *vp; 125 register struct uio *uio; 126 int ioflag; 127 struct ucred *cred; 128 int getpages; 129{ 130 register struct nfsnode *np = VTONFS(vp); 131 register int biosize, diff, i; 132 struct buf *bp = 0, *rabp; 133 struct vattr vattr; 134 struct proc *p; 135 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 136 daddr_t lbn, rabn; 137 int bufsize; 138 int nra, error = 0, n = 0, on = 0, not_readin; 139 140#ifdef DIAGNOSTIC 141 if (uio->uio_rw != UIO_READ) 142 panic("nfs_read mode"); 143#endif 144 if (uio->uio_resid == 0) 145 return (0); 146 if (uio->uio_offset < 0) 147 return (EINVAL); 148 p = uio->uio_procp; 149 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 150 (void)nfs_fsinfo(nmp, vp, cred, p); 151 biosize = vp->v_mount->mnt_stat.f_iosize; 152 /* 153 * For nfs, cache consistency can only be maintained approximately. 154 * Although RFC1094 does not specify the criteria, the following is 155 * believed to be compatible with the reference port. 156 * For nqnfs, full cache consistency is maintained within the loop. 157 * For nfs: 158 * If the file's modify time on the server has changed since the 159 * last read rpc or you have written to the file, 160 * you may have lost data cache consistency with the 161 * server, so flush all of the file's data out of the cache. 162 * Then force a getattr rpc to ensure that you have up to date 163 * attributes. 164 * NB: This implies that cache data can be read when up to 165 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 166 * attributes this could be forced by setting n_attrstamp to 0 before 167 * the VOP_GETATTR() call. 168 */ 169 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) { 170 if (np->n_flag & NMODIFIED) { 171 if (vp->v_type != VREG) { 172 if (vp->v_type != VDIR) 173 panic("nfs: bioread, not dir"); 174 nfs_invaldir(vp); 175 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 176 if (error) 177 return (error); 178 } 179 np->n_attrstamp = 0; 180 error = VOP_GETATTR(vp, &vattr, cred, p); 181 if (error) 182 return (error); 183 np->n_mtime = vattr.va_mtime.tv_sec; 184 } else { 185 error = VOP_GETATTR(vp, &vattr, cred, p); 186 if (error) 187 return (error); 188 if (np->n_mtime != vattr.va_mtime.tv_sec) { 189 if (vp->v_type == VDIR) 190 nfs_invaldir(vp); 191 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 192 if (error) 193 return (error); 194 np->n_mtime = vattr.va_mtime.tv_sec; 195 } 196 } 197 } 198 do { 199 200 /* 201 * Get a valid lease. If cached data is stale, flush it. 202 */ 203 if (nmp->nm_flag & NFSMNT_NQNFS) { 204 if (NQNFS_CKINVALID(vp, np, ND_READ)) { 205 do { 206 error = nqnfs_getlease(vp, ND_READ, cred, p); 207 } while (error == NQNFS_EXPIRED); 208 if (error) 209 return (error); 210 if (np->n_lrev != np->n_brev || 211 (np->n_flag & NQNFSNONCACHE) || 212 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) { 213 if (vp->v_type == VDIR) 214 nfs_invaldir(vp); 215 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 216 if (error) 217 return (error); 218 np->n_brev = np->n_lrev; 219 } 220 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) { 221 nfs_invaldir(vp); 222 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 223 if (error) 224 return (error); 225 } 226 } 227 if (np->n_flag & NQNFSNONCACHE) { 228 switch (vp->v_type) { 229 case VREG: 230 return (nfs_readrpc(vp, uio, cred)); 231 case VLNK: 232 return (nfs_readlinkrpc(vp, uio, cred)); 233 case VDIR: 234 break; 235 default: 236 printf(" NQNFSNONCACHE: type %x unexpected\n", 237 vp->v_type); 238 }; 239 } 240 switch (vp->v_type) { 241 case VREG: 242 nfsstats.biocache_reads++; 243 lbn = uio->uio_offset / biosize; 244 on = uio->uio_offset & (biosize - 1); 245 not_readin = 1; 246 247 /* 248 * Start the read ahead(s), as required. 249 */ 250 if (nfs_numasync > 0 && nmp->nm_readahead > 0) { 251 for (nra = 0; nra < nmp->nm_readahead && 252 (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { 253 rabn = lbn + 1 + nra; 254 if (!incore(vp, rabn)) { 255 rabp = nfs_getcacheblk(vp, rabn, biosize, p); 256 if (!rabp) 257 return (EINTR); 258 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 259 rabp->b_flags |= (B_READ | B_ASYNC); 260 vfs_busy_pages(rabp, 0); 261 if (nfs_asyncio(rabp, cred)) { 262 rabp->b_flags |= B_INVAL|B_ERROR; 263 vfs_unbusy_pages(rabp); 264 brelse(rabp); 265 } 266 } else 267 brelse(rabp); 268 } 269 } 270 } 271 272 /* 273 * If the block is in the cache and has the required data 274 * in a valid region, just copy it out. 275 * Otherwise, get the block and write back/read in, 276 * as required. 277 */ 278again: 279 bufsize = biosize; 280 if ((off_t)(lbn + 1) * biosize > np->n_size && 281 (off_t)(lbn + 1) * biosize - np->n_size < biosize) { 282 bufsize = np->n_size - lbn * biosize; 283 bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 284 } 285 bp = nfs_getcacheblk(vp, lbn, bufsize, p); 286 if (!bp) 287 return (EINTR); 288 /* 289 * If we are being called from nfs_getpages, we must 290 * make sure the buffer is a vmio buffer. The vp will 291 * already be setup for vmio but there may be some old 292 * non-vmio buffers attached to it. 293 */ 294 if (getpages && !(bp->b_flags & B_VMIO)) { 295#ifdef DIAGNOSTIC 296 printf("nfs_bioread: non vmio buf found, discarding\n"); 297#endif 298 bp->b_flags |= B_NOCACHE; 299 bp->b_flags |= B_INVAFTERWRITE; 300 if (bp->b_dirtyend > 0) { 301 if ((bp->b_flags & B_DELWRI) == 0) 302 panic("nfsbioread"); 303 if (VOP_BWRITE(bp) == EINTR) 304 return (EINTR); 305 } else 306 brelse(bp); 307 goto again; 308 } 309 if ((bp->b_flags & B_CACHE) == 0) { 310 bp->b_flags |= B_READ; 311 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 312 not_readin = 0; 313 vfs_busy_pages(bp, 0); 314 error = nfs_doio(bp, cred, p); 315 if (error) { 316 brelse(bp); 317 return (error); 318 } 319 } 320 if (bufsize > on) { 321 n = min((unsigned)(bufsize - on), uio->uio_resid); 322 } else { 323 n = 0; 324 } 325 diff = np->n_size - uio->uio_offset; 326 if (diff < n) 327 n = diff; 328 if (not_readin && n > 0) { 329 if (on < bp->b_validoff || (on + n) > bp->b_validend) { 330 bp->b_flags |= B_NOCACHE; 331 bp->b_flags |= B_INVAFTERWRITE; 332 if (bp->b_dirtyend > 0) { 333 if ((bp->b_flags & B_DELWRI) == 0) 334 panic("nfsbioread"); 335 if (VOP_BWRITE(bp) == EINTR) 336 return (EINTR); 337 } else 338 brelse(bp); 339 goto again; 340 } 341 } 342 vp->v_lastr = lbn; 343 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on); 344 if (diff < n) 345 n = diff; 346 break; 347 case VLNK: 348 nfsstats.biocache_readlinks++; 349 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p); 350 if (!bp) 351 return (EINTR); 352 if ((bp->b_flags & B_CACHE) == 0) { 353 bp->b_flags |= B_READ; 354 vfs_busy_pages(bp, 0); 355 error = nfs_doio(bp, cred, p); 356 if (error) { 357 bp->b_flags |= B_ERROR; 358 brelse(bp); 359 return (error); 360 } 361 } 362 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 363 on = 0; 364 break; 365 case VDIR: 366 nfsstats.biocache_readdirs++; 367 if (np->n_direofoffset 368 && uio->uio_offset >= np->n_direofoffset) { 369 return (0); 370 } 371 lbn = uio->uio_offset / NFS_DIRBLKSIZ; 372 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); 373 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, p); 374 if (!bp) 375 return (EINTR); 376 if ((bp->b_flags & B_CACHE) == 0) { 377 bp->b_flags |= B_READ; 378 vfs_busy_pages(bp, 0); 379 error = nfs_doio(bp, cred, p); 380 while (error == NFSERR_BAD_COOKIE) { 381 nfs_invaldir(vp); 382 error = nfs_vinvalbuf(vp, 0, cred, p, 1); 383 /* 384 * Yuck! The directory has been modified on the 385 * server. The only way to get the block is by 386 * reading from the beginning to get all the 387 * offset cookies. 388 */ 389 for (i = 0; i <= lbn && !error; i++) { 390 if (np->n_direofoffset 391 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) 392 return (0); 393 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, p); 394 if (!bp) 395 return (EINTR); 396 if ((bp->b_flags & B_DONE) == 0) { 397 bp->b_flags |= B_READ; 398 vfs_busy_pages(bp, 0); 399 error = nfs_doio(bp, cred, p); 400 if (error) { 401 brelse(bp); 402 } else if (i < lbn) { 403 brelse(bp); 404 } 405 } 406 } 407 if (error) 408 return (error); 409 } 410 } 411 412 /* 413 * If not eof and read aheads are enabled, start one. 414 * (You need the current block first, so that you have the 415 * directory offset cookie of the next block.) 416 */ 417 if (nfs_numasync > 0 && nmp->nm_readahead > 0 && 418 (np->n_direofoffset == 0 || 419 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && 420 !(np->n_flag & NQNFSNONCACHE) && 421 !incore(vp, lbn + 1)) { 422 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, p); 423 if (rabp) { 424 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { 425 rabp->b_flags |= (B_READ | B_ASYNC); 426 vfs_busy_pages(rabp, 0); 427 if (nfs_asyncio(rabp, cred)) { 428 rabp->b_flags |= B_INVAL|B_ERROR; 429 vfs_unbusy_pages(rabp); 430 brelse(rabp); 431 } 432 } else { 433 brelse(rabp); 434 } 435 } 436 } 437 /* 438 * Make sure we use a signed variant of min() since 439 * the second term may be negative. 440 */ 441 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); 442 break; 443 default: 444 printf(" nfs_bioread: type %x unexpected\n",vp->v_type); 445 break; 446 }; 447 448 if (n > 0) { 449 error = uiomove(bp->b_data + on, (int)n, uio); 450 } 451 switch (vp->v_type) { 452 case VREG: 453 break; 454 case VLNK: 455 n = 0; 456 break; 457 case VDIR: 458 if (np->n_flag & NQNFSNONCACHE) 459 bp->b_flags |= B_INVAL; 460 break; 461 default: 462 printf(" nfs_bioread: type %x unexpected\n",vp->v_type); 463 } 464 brelse(bp); 465 } while (error == 0 && uio->uio_resid > 0 && n > 0); 466 return (error); 467} 468 469/* 470 * Vnode op for write using bio 471 */ 472int 473nfs_write(ap) 474 struct vop_write_args /* { 475 struct vnode *a_vp; 476 struct uio *a_uio; 477 int a_ioflag; 478 struct ucred *a_cred; 479 } */ *ap; 480{ 481 register int biosize; 482 register struct uio *uio = ap->a_uio; 483 struct proc *p = uio->uio_procp; 484 register struct vnode *vp = ap->a_vp; 485 struct nfsnode *np = VTONFS(vp); 486 register struct ucred *cred = ap->a_cred; 487 int ioflag = ap->a_ioflag; 488 struct buf *bp; 489 struct vattr vattr; 490 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 491 daddr_t lbn; 492 int bufsize; 493 int n, on, error = 0, iomode, must_commit; 494 495#ifdef DIAGNOSTIC 496 if (uio->uio_rw != UIO_WRITE) 497 panic("nfs_write mode"); 498 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 499 panic("nfs_write proc"); 500#endif 501 if (vp->v_type != VREG) 502 return (EIO); 503 if (np->n_flag & NWRITEERR) { 504 np->n_flag &= ~NWRITEERR; 505 return (np->n_error); 506 } 507 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 508 (void)nfs_fsinfo(nmp, vp, cred, p); 509 if (ioflag & (IO_APPEND | IO_SYNC)) { 510 if (np->n_flag & NMODIFIED) { 511 np->n_attrstamp = 0; 512 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 513 if (error) 514 return (error); 515 } 516 if (ioflag & IO_APPEND) { 517 np->n_attrstamp = 0; 518 error = VOP_GETATTR(vp, &vattr, cred, p); 519 if (error) 520 return (error); 521 uio->uio_offset = np->n_size; 522 } 523 } 524 if (uio->uio_offset < 0) 525 return (EINVAL); 526 if (uio->uio_resid == 0) 527 return (0); 528 /* 529 * Maybe this should be above the vnode op call, but so long as 530 * file servers have no limits, i don't think it matters 531 */ 532 if (p && uio->uio_offset + uio->uio_resid > 533 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 534 psignal(p, SIGXFSZ); 535 return (EFBIG); 536 } 537 /* 538 * I use nm_rsize, not nm_wsize so that all buffer cache blocks 539 * will be the same size within a filesystem. nfs_writerpc will 540 * still use nm_wsize when sizing the rpc's. 541 */ 542 biosize = vp->v_mount->mnt_stat.f_iosize; 543 do { 544 /* 545 * Check for a valid write lease. 546 */ 547 if ((nmp->nm_flag & NFSMNT_NQNFS) && 548 NQNFS_CKINVALID(vp, np, ND_WRITE)) { 549 do { 550 error = nqnfs_getlease(vp, ND_WRITE, cred, p); 551 } while (error == NQNFS_EXPIRED); 552 if (error) 553 return (error); 554 if (np->n_lrev != np->n_brev || 555 (np->n_flag & NQNFSNONCACHE)) { 556 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 557 if (error) 558 return (error); 559 np->n_brev = np->n_lrev; 560 } 561 } 562 if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) { 563 iomode = NFSV3WRITE_FILESYNC; 564 error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit); 565 if (must_commit) 566 nfs_clearcommit(vp->v_mount); 567 return (error); 568 } 569 nfsstats.biocache_writes++; 570 lbn = uio->uio_offset / biosize; 571 on = uio->uio_offset & (biosize-1); 572 n = min((unsigned)(biosize - on), uio->uio_resid); 573again: 574 if (uio->uio_offset + n > np->n_size) { 575 np->n_size = uio->uio_offset + n; 576 np->n_flag |= NMODIFIED; 577 vnode_pager_setsize(vp, (u_long)np->n_size); 578 } 579 bufsize = biosize; 580 if ((lbn + 1) * biosize > np->n_size) { 581 bufsize = np->n_size - lbn * biosize; 582 bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 583 } 584 bp = nfs_getcacheblk(vp, lbn, bufsize, p); 585 if (!bp) 586 return (EINTR); 587 if (bp->b_wcred == NOCRED) { 588 crhold(cred); 589 bp->b_wcred = cred; 590 } 591 np->n_flag |= NMODIFIED; 592 593 if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) { 594 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); 595 } 596 597 /* 598 * If the new write will leave a contiguous dirty 599 * area, just update the b_dirtyoff and b_dirtyend, 600 * otherwise force a write rpc of the old dirty area. 601 */ 602 if (bp->b_dirtyend > 0 && 603 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 604 bp->b_proc = p; 605 if (VOP_BWRITE(bp) == EINTR) 606 return (EINTR); 607 goto again; 608 } 609 610 /* 611 * Check for valid write lease and get one as required. 612 * In case getblk() and/or bwrite() delayed us. 613 */ 614 if ((nmp->nm_flag & NFSMNT_NQNFS) && 615 NQNFS_CKINVALID(vp, np, ND_WRITE)) { 616 do { 617 error = nqnfs_getlease(vp, ND_WRITE, cred, p); 618 } while (error == NQNFS_EXPIRED); 619 if (error) { 620 brelse(bp); 621 return (error); 622 } 623 if (np->n_lrev != np->n_brev || 624 (np->n_flag & NQNFSNONCACHE)) { 625 brelse(bp); 626 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 627 if (error) 628 return (error); 629 np->n_brev = np->n_lrev; 630 goto again; 631 } 632 } 633 error = uiomove((char *)bp->b_data + on, n, uio); 634 if (error) { 635 bp->b_flags |= B_ERROR; 636 brelse(bp); 637 return (error); 638 } 639 if (bp->b_dirtyend > 0) { 640 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 641 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 642 } else { 643 bp->b_dirtyoff = on; 644 bp->b_dirtyend = on + n; 645 } 646 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff || 647 bp->b_validoff > bp->b_dirtyend) { 648 bp->b_validoff = bp->b_dirtyoff; 649 bp->b_validend = bp->b_dirtyend; 650 } else { 651 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 652 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 653 } 654 655 /* 656 * Since this block is being modified, it must be written 657 * again and not just committed. 658 */ 659 bp->b_flags &= ~B_NEEDCOMMIT; 660 661 /* 662 * If the lease is non-cachable or IO_SYNC do bwrite(). 663 */ 664 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) { 665 bp->b_proc = p; 666 error = VOP_BWRITE(bp); 667 if (error) 668 return (error); 669 if (np->n_flag & NQNFSNONCACHE) { 670 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); 671 if (error) 672 return (error); 673 } 674 } else if ((n + on) == biosize && 675 (nmp->nm_flag & NFSMNT_NQNFS) == 0) { 676 bp->b_proc = (struct proc *)0; 677 bp->b_flags |= B_ASYNC; 678 (void)nfs_writebp(bp, 0); 679 } else 680 bdwrite(bp); 681 } while (uio->uio_resid > 0 && n > 0); 682 return (0); 683} 684 685/* 686 * Get an nfs cache block. 687 * Allocate a new one if the block isn't currently in the cache 688 * and return the block marked busy. If the calling process is 689 * interrupted by a signal for an interruptible mount point, return 690 * NULL. 691 */ 692static struct buf * 693nfs_getcacheblk(vp, bn, size, p) 694 struct vnode *vp; 695 daddr_t bn; 696 int size; 697 struct proc *p; 698{ 699 register struct buf *bp; 700 struct mount *mp; 701 struct nfsmount *nmp; 702 703 mp = vp->v_mount; 704 nmp = VFSTONFS(mp); 705 706 if (nmp->nm_flag & NFSMNT_INT) { 707 bp = getblk(vp, bn, size, PCATCH, 0); 708 while (bp == (struct buf *)0) { 709 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) 710 return ((struct buf *)0); 711 bp = getblk(vp, bn, size, 0, 2 * hz); 712 } 713 } else 714 bp = getblk(vp, bn, size, 0, 0); 715 716 if( vp->v_type == VREG) { 717 int biosize; 718 biosize = mp->mnt_stat.f_iosize; 719 bp->b_blkno = (bn * biosize) / DEV_BSIZE; 720 } 721 722 return (bp); 723} 724 725/* 726 * Flush and invalidate all dirty buffers. If another process is already 727 * doing the flush, just wait for completion. 728 */ 729int 730nfs_vinvalbuf(vp, flags, cred, p, intrflg) 731 struct vnode *vp; 732 int flags; 733 struct ucred *cred; 734 struct proc *p; 735 int intrflg; 736{ 737 register struct nfsnode *np = VTONFS(vp); 738 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 739 int error = 0, slpflag, slptimeo; 740 741 if (vp->v_flag & VXLOCK) { 742 return (0); 743 } 744 745 if ((nmp->nm_flag & NFSMNT_INT) == 0) 746 intrflg = 0; 747 if (intrflg) { 748 slpflag = PCATCH; 749 slptimeo = 2 * hz; 750 } else { 751 slpflag = 0; 752 slptimeo = 0; 753 } 754 /* 755 * First wait for any other process doing a flush to complete. 756 */ 757 while (np->n_flag & NFLUSHINPROG) { 758 np->n_flag |= NFLUSHWANT; 759 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", 760 slptimeo); 761 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) 762 return (EINTR); 763 } 764 765 /* 766 * Now, flush as required. 767 */ 768 np->n_flag |= NFLUSHINPROG; 769 error = vinvalbuf(vp, flags, cred, p, slpflag, 0); 770 while (error) { 771 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) { 772 np->n_flag &= ~NFLUSHINPROG; 773 if (np->n_flag & NFLUSHWANT) { 774 np->n_flag &= ~NFLUSHWANT; 775 wakeup((caddr_t)&np->n_flag); 776 } 777 return (EINTR); 778 } 779 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo); 780 } 781 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 782 if (np->n_flag & NFLUSHWANT) { 783 np->n_flag &= ~NFLUSHWANT; 784 wakeup((caddr_t)&np->n_flag); 785 } 786 return (0); 787} 788 789/* 790 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 791 * This is mainly to avoid queueing async I/O requests when the nfsiods 792 * are all hung on a dead server. 793 */ 794int 795nfs_asyncio(bp, cred) 796 register struct buf *bp; 797 struct ucred *cred; 798{ 799 struct nfsmount *nmp; 800 int i; 801 int gotiod; 802 int slpflag = 0; 803 int slptimeo = 0; 804 int error; 805 806 if (nfs_numasync == 0) 807 return (EIO); 808 809 nmp = VFSTONFS(bp->b_vp->v_mount); 810again: 811 if (nmp->nm_flag & NFSMNT_INT) 812 slpflag = PCATCH; 813 gotiod = FALSE; 814 815 /* 816 * Find a free iod to process this request. 817 */ 818 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) 819 if (nfs_iodwant[i]) { 820 /* 821 * Found one, so wake it up and tell it which 822 * mount to process. 823 */ 824 NFS_DPF(ASYNCIO, 825 ("nfs_asyncio: waking iod %d for mount %p\n", 826 i, nmp)); 827 nfs_iodwant[i] = (struct proc *)0; 828 nfs_iodmount[i] = nmp; 829 nmp->nm_bufqiods++; 830 wakeup((caddr_t)&nfs_iodwant[i]); 831 gotiod = TRUE; 832 break; 833 } 834 835 /* 836 * If none are free, we may already have an iod working on this mount 837 * point. If so, it will process our request. 838 */ 839 if (!gotiod) { 840 if (nmp->nm_bufqiods > 0) { 841 NFS_DPF(ASYNCIO, 842 ("nfs_asyncio: %d iods are already processing mount %p\n", 843 nmp->nm_bufqiods, nmp)); 844 gotiod = TRUE; 845 } 846 } 847 848 /* 849 * If we have an iod which can process the request, then queue 850 * the buffer. 851 */ 852 if (gotiod) { 853 /* 854 * Ensure that the queue never grows too large. 855 */ 856 while (nmp->nm_bufqlen >= 2*nfs_numasync) { 857 NFS_DPF(ASYNCIO, 858 ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); 859 nmp->nm_bufqwant = TRUE; 860 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO, 861 "nfsaio", slptimeo); 862 if (error) { 863 if (nfs_sigintr(nmp, NULL, bp->b_proc)) 864 return (EINTR); 865 if (slpflag == PCATCH) { 866 slpflag = 0; 867 slptimeo = 2 * hz; 868 } 869 } 870 /* 871 * We might have lost our iod while sleeping, 872 * so check and loop if nescessary. 873 */ 874 if (nmp->nm_bufqiods == 0) { 875 NFS_DPF(ASYNCIO, 876 ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); 877 goto again; 878 } 879 } 880 881 if (bp->b_flags & B_READ) { 882 if (bp->b_rcred == NOCRED && cred != NOCRED) { 883 crhold(cred); 884 bp->b_rcred = cred; 885 } 886 } else { 887 bp->b_flags |= B_WRITEINPROG; 888 if (bp->b_wcred == NOCRED && cred != NOCRED) { 889 crhold(cred); 890 bp->b_wcred = cred; 891 } 892 } 893 894 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); 895 nmp->nm_bufqlen++; 896 return (0); 897 } 898 899 /* 900 * All the iods are busy on other mounts, so return EIO to 901 * force the caller to process the i/o synchronously. 902 */ 903 NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); 904 return (EIO); 905} 906 907/* 908 * Do an I/O operation to/from a cache block. This may be called 909 * synchronously or from an nfsiod. 910 */ 911int 912nfs_doio(bp, cr, p) 913 register struct buf *bp; 914 struct ucred *cr; 915 struct proc *p; 916{ 917 register struct uio *uiop; 918 register struct vnode *vp; 919 struct nfsnode *np; 920 struct nfsmount *nmp; 921 int error = 0, diff, len, iomode, must_commit = 0; 922 struct uio uio; 923 struct iovec io; 924 925 vp = bp->b_vp; 926 np = VTONFS(vp); 927 nmp = VFSTONFS(vp->v_mount); 928 uiop = &uio; 929 uiop->uio_iov = &io; 930 uiop->uio_iovcnt = 1; 931 uiop->uio_segflg = UIO_SYSSPACE; 932 uiop->uio_procp = p; 933 934 /* 935 * Historically, paging was done with physio, but no more. 936 */ 937 if (bp->b_flags & B_PHYS) { 938 /* 939 * ...though reading /dev/drum still gets us here. 940 */ 941 io.iov_len = uiop->uio_resid = bp->b_bcount; 942 /* mapping was done by vmapbuf() */ 943 io.iov_base = bp->b_data; 944 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 945 if (bp->b_flags & B_READ) { 946 uiop->uio_rw = UIO_READ; 947 nfsstats.read_physios++; 948 error = nfs_readrpc(vp, uiop, cr); 949 } else { 950 int com; 951 952 iomode = NFSV3WRITE_DATASYNC; 953 uiop->uio_rw = UIO_WRITE; 954 nfsstats.write_physios++; 955 error = nfs_writerpc(vp, uiop, cr, &iomode, &com); 956 } 957 if (error) { 958 bp->b_flags |= B_ERROR; 959 bp->b_error = error; 960 } 961 } else if (bp->b_flags & B_READ) { 962 io.iov_len = uiop->uio_resid = bp->b_bcount; 963 io.iov_base = bp->b_data; 964 uiop->uio_rw = UIO_READ; 965 switch (vp->v_type) { 966 case VREG: 967 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; 968 nfsstats.read_bios++; 969 error = nfs_readrpc(vp, uiop, cr); 970 if (!error) { 971 bp->b_validoff = 0; 972 if (uiop->uio_resid) { 973 /* 974 * If len > 0, there is a hole in the file and 975 * no writes after the hole have been pushed to 976 * the server yet. 977 * Just zero fill the rest of the valid area. 978 */ 979 diff = bp->b_bcount - uiop->uio_resid; 980 len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE 981 + diff); 982 if (len > 0) { 983 len = min(len, uiop->uio_resid); 984 bzero((char *)bp->b_data + diff, len); 985 bp->b_validend = diff + len; 986 } else 987 bp->b_validend = diff; 988 } else 989 bp->b_validend = bp->b_bcount; 990 } 991 if (p && (vp->v_flag & VTEXT) && 992 (((nmp->nm_flag & NFSMNT_NQNFS) && 993 NQNFS_CKINVALID(vp, np, ND_READ) && 994 np->n_lrev != np->n_brev) || 995 (!(nmp->nm_flag & NFSMNT_NQNFS) && 996 np->n_mtime != np->n_vattr.va_mtime.tv_sec))) { 997 uprintf("Process killed due to text file modification\n"); 998 psignal(p, SIGKILL); 999 p->p_flag |= P_NOSWAP; 1000 } 1001 break; 1002 case VLNK: 1003 uiop->uio_offset = (off_t)0; 1004 nfsstats.readlink_bios++; 1005 error = nfs_readlinkrpc(vp, uiop, cr); 1006 break; 1007 case VDIR: 1008 nfsstats.readdir_bios++; 1009 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; 1010 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 1011 error = nfs_readdirplusrpc(vp, uiop, cr); 1012 if (error == NFSERR_NOTSUPP) 1013 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1014 } 1015 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1016 error = nfs_readdirrpc(vp, uiop, cr); 1017 break; 1018 default: 1019 printf("nfs_doio: type %x unexpected\n",vp->v_type); 1020 break; 1021 }; 1022 if (error) { 1023 bp->b_flags |= B_ERROR; 1024 bp->b_error = error; 1025 } 1026 } else { 1027 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size) 1028 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); 1029 1030 if (bp->b_dirtyend > bp->b_dirtyoff) { 1031 io.iov_len = uiop->uio_resid = bp->b_dirtyend 1032 - bp->b_dirtyoff; 1033 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE 1034 + bp->b_dirtyoff; 1035 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 1036 uiop->uio_rw = UIO_WRITE; 1037 nfsstats.write_bios++; 1038 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC) 1039 iomode = NFSV3WRITE_UNSTABLE; 1040 else 1041 iomode = NFSV3WRITE_FILESYNC; 1042 bp->b_flags |= B_WRITEINPROG; 1043 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit); 1044 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 1045 bp->b_flags |= B_NEEDCOMMIT; 1046 if (bp->b_dirtyoff == 0 1047 && bp->b_dirtyend == bp->b_bufsize) 1048 bp->b_flags |= B_CLUSTEROK; 1049 } else 1050 bp->b_flags &= ~B_NEEDCOMMIT; 1051 bp->b_flags &= ~B_WRITEINPROG; 1052 1053 /* 1054 * For an interrupted write, the buffer is still valid 1055 * and the write hasn't been pushed to the server yet, 1056 * so we can't set B_ERROR and report the interruption 1057 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 1058 * is not relevant, so the rpc attempt is essentially 1059 * a noop. For the case of a V3 write rpc not being 1060 * committed to stable storage, the block is still 1061 * dirty and requires either a commit rpc or another 1062 * write rpc with iomode == NFSV3WRITE_FILESYNC before 1063 * the block is reused. This is indicated by setting 1064 * the B_DELWRI and B_NEEDCOMMIT flags. 1065 */ 1066 if (error == EINTR 1067 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 1068 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 1069 ++numdirtybuffers; 1070 bp->b_flags |= B_DELWRI; 1071 reassignbuf(bp, vp); 1072 if ((bp->b_flags & B_ASYNC) == 0) 1073 bp->b_flags |= B_EINTR; 1074 } else { 1075 if (error) { 1076 bp->b_flags |= B_ERROR; 1077 bp->b_error = np->n_error = error; 1078 np->n_flag |= NWRITEERR; 1079 } 1080 bp->b_dirtyoff = bp->b_dirtyend = 0; 1081 } 1082 } else { 1083 bp->b_resid = 0; 1084 biodone(bp); 1085 return (0); 1086 } 1087 } 1088 bp->b_resid = uiop->uio_resid; 1089 if (must_commit) 1090 nfs_clearcommit(vp->v_mount); 1091 biodone(bp); 1092 return (error); 1093} 1094