nfs_vnops.c revision 36521
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 37 * $Id: nfs_vnops.c,v 1.93 1998/05/31 17:48:05 peter Exp $ 38 */ 39 40 41/* 42 * vnode op calls for Sun NFS version 2 and 3 43 */ 44 45#include "opt_inet.h" 46 47#include <sys/param.h> 48#include <sys/kernel.h> 49#include <sys/systm.h> 50#include <sys/resourcevar.h> 51#include <sys/proc.h> 52#include <sys/mount.h> 53#include <sys/buf.h> 54#include <sys/malloc.h> 55#include <sys/mbuf.h> 56#include <sys/namei.h> 57#include <sys/socket.h> 58#include <sys/vnode.h> 59#include <sys/dirent.h> 60#include <sys/fcntl.h> 61#include <sys/lockf.h> 62 63#include <vm/vm.h> 64#include <vm/vm_extern.h> 65#include <vm/vm_zone.h> 66 67#include <miscfs/fifofs/fifo.h> 68#include <miscfs/specfs/specdev.h> 69 70#include <nfs/rpcv2.h> 71#include <nfs/nfsproto.h> 72#include <nfs/nfs.h> 73#include <nfs/nfsnode.h> 74#include <nfs/nfsmount.h> 75#include <nfs/xdr_subs.h> 76#include <nfs/nfsm_subs.h> 77#include <nfs/nqnfs.h> 78 79#include <net/if.h> 80#include <netinet/in.h> 81#include <netinet/in_var.h> 82 83/* Defs */ 84#define TRUE 1 85#define FALSE 0 86 87/* 88 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these 89 * calls are not in getblk() and brelse() so that they would not be necessary 90 * here. 91 */ 92#ifndef B_VMIO 93#define vfs_busy_pages(bp, f) 94#endif 95 96static int nfsspec_read __P((struct vop_read_args *)); 97static int nfsspec_write __P((struct vop_write_args *)); 98static int nfsfifo_read __P((struct vop_read_args *)); 99static int nfsfifo_write __P((struct vop_write_args *)); 100static int nfsspec_close __P((struct vop_close_args *)); 101static int nfsfifo_close __P((struct vop_close_args *)); 102#define nfs_poll vop_nopoll 103static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int)); 104static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *)); 105static int nfs_lookup __P((struct vop_lookup_args *)); 106static int nfs_create __P((struct vop_create_args *)); 107static int nfs_mknod __P((struct vop_mknod_args *)); 108static int nfs_open __P((struct vop_open_args *)); 109static int nfs_close __P((struct vop_close_args *)); 110static int nfs_access __P((struct vop_access_args *)); 111static int nfs_getattr __P((struct vop_getattr_args *)); 112static int nfs_setattr __P((struct vop_setattr_args *)); 113static int nfs_read __P((struct vop_read_args *)); 114static int nfs_mmap __P((struct vop_mmap_args *)); 115static int nfs_fsync __P((struct vop_fsync_args *)); 116static int nfs_remove __P((struct vop_remove_args *)); 117static int nfs_link __P((struct vop_link_args *)); 118static int nfs_rename __P((struct vop_rename_args *)); 119static int nfs_mkdir __P((struct vop_mkdir_args *)); 120static int nfs_rmdir __P((struct vop_rmdir_args *)); 121static int nfs_symlink __P((struct vop_symlink_args *)); 122static int nfs_readdir __P((struct vop_readdir_args *)); 123static int nfs_bmap __P((struct vop_bmap_args *)); 124static int nfs_strategy __P((struct vop_strategy_args *)); 125static int nfs_lookitup __P((struct vnode *, const char *, int, 126 struct ucred *, struct proc *, struct nfsnode **)); 127static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *)); 128static int nfsspec_access __P((struct vop_access_args *)); 129static int nfs_readlink __P((struct vop_readlink_args *)); 130static int nfs_print __P((struct vop_print_args *)); 131static int nfs_advlock __P((struct vop_advlock_args *)); 132static int nfs_bwrite __P((struct vop_bwrite_args *)); 133/* 134 * Global vfs data structures for nfs 135 */ 136vop_t **nfsv2_vnodeop_p; 137static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 138 { &vop_default_desc, (vop_t *) vop_defaultop }, 139 { &vop_abortop_desc, (vop_t *) nfs_abortop }, 140 { &vop_access_desc, (vop_t *) nfs_access }, 141 { &vop_advlock_desc, (vop_t *) nfs_advlock }, 142 { &vop_bmap_desc, (vop_t *) nfs_bmap }, 143 { &vop_bwrite_desc, (vop_t *) nfs_bwrite }, 144 { &vop_close_desc, (vop_t *) nfs_close }, 145 { &vop_create_desc, (vop_t *) nfs_create }, 146 { &vop_fsync_desc, (vop_t *) nfs_fsync }, 147 { &vop_getattr_desc, (vop_t *) nfs_getattr }, 148 { &vop_getpages_desc, (vop_t *) nfs_getpages }, 149 { &vop_putpages_desc, (vop_t *) nfs_putpages }, 150 { &vop_inactive_desc, (vop_t *) nfs_inactive }, 151 { &vop_lease_desc, (vop_t *) vop_null }, 152 { &vop_link_desc, (vop_t *) nfs_link }, 153 { &vop_lock_desc, (vop_t *) vop_sharedlock }, 154 { &vop_lookup_desc, (vop_t *) nfs_lookup }, 155 { &vop_mkdir_desc, (vop_t *) nfs_mkdir }, 156 { &vop_mknod_desc, (vop_t *) nfs_mknod }, 157 { &vop_mmap_desc, (vop_t *) nfs_mmap }, 158 { &vop_open_desc, (vop_t *) nfs_open }, 159 { &vop_poll_desc, (vop_t *) nfs_poll }, 160 { &vop_print_desc, (vop_t *) nfs_print }, 161 { &vop_read_desc, (vop_t *) nfs_read }, 162 { &vop_readdir_desc, (vop_t *) nfs_readdir }, 163 { &vop_readlink_desc, (vop_t *) nfs_readlink }, 164 { &vop_reclaim_desc, (vop_t *) nfs_reclaim }, 165 { &vop_remove_desc, (vop_t *) nfs_remove }, 166 { &vop_rename_desc, (vop_t *) nfs_rename }, 167 { &vop_rmdir_desc, (vop_t *) nfs_rmdir }, 168 { &vop_setattr_desc, (vop_t *) nfs_setattr }, 169 { &vop_strategy_desc, (vop_t *) nfs_strategy }, 170 { &vop_symlink_desc, (vop_t *) nfs_symlink }, 171 { &vop_write_desc, (vop_t *) nfs_write }, 172 { NULL, NULL } 173}; 174static struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 175 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 176VNODEOP_SET(nfsv2_vnodeop_opv_desc); 177 178/* 179 * Special device vnode ops 180 */ 181vop_t **spec_nfsv2nodeop_p; 182static struct vnodeopv_entry_desc nfsv2_specop_entries[] = { 183 { &vop_default_desc, (vop_t *) spec_vnoperate }, 184 { &vop_access_desc, (vop_t *) nfsspec_access }, 185 { &vop_close_desc, (vop_t *) nfsspec_close }, 186 { &vop_fsync_desc, (vop_t *) nfs_fsync }, 187 { &vop_getattr_desc, (vop_t *) nfs_getattr }, 188 { &vop_inactive_desc, (vop_t *) nfs_inactive }, 189 { &vop_lock_desc, (vop_t *) vop_sharedlock }, 190 { &vop_print_desc, (vop_t *) nfs_print }, 191 { &vop_read_desc, (vop_t *) nfsspec_read }, 192 { &vop_reclaim_desc, (vop_t *) nfs_reclaim }, 193 { &vop_setattr_desc, (vop_t *) nfs_setattr }, 194 { &vop_write_desc, (vop_t *) nfsspec_write }, 195 { NULL, NULL } 196}; 197static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 198 { &spec_nfsv2nodeop_p, nfsv2_specop_entries }; 199VNODEOP_SET(spec_nfsv2nodeop_opv_desc); 200 201vop_t **fifo_nfsv2nodeop_p; 202static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = { 203 { &vop_default_desc, (vop_t *) fifo_vnoperate }, 204 { &vop_access_desc, (vop_t *) nfsspec_access }, 205 { &vop_close_desc, (vop_t *) nfsfifo_close }, 206 { &vop_fsync_desc, (vop_t *) nfs_fsync }, 207 { &vop_getattr_desc, (vop_t *) nfs_getattr }, 208 { &vop_inactive_desc, (vop_t *) nfs_inactive }, 209 { &vop_lock_desc, (vop_t *) vop_sharedlock }, 210 { &vop_print_desc, (vop_t *) nfs_print }, 211 { &vop_read_desc, (vop_t *) nfsfifo_read }, 212 { &vop_reclaim_desc, (vop_t *) nfs_reclaim }, 213 { &vop_setattr_desc, (vop_t *) nfs_setattr }, 214 { &vop_write_desc, (vop_t *) nfsfifo_write }, 215 { NULL, NULL } 216}; 217static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 218 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries }; 219VNODEOP_SET(fifo_nfsv2nodeop_opv_desc); 220 221static int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt, 222 struct ucred *cred, struct proc *procp)); 223static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp, 224 struct componentname *cnp, 225 struct vattr *vap)); 226static int nfs_removerpc __P((struct vnode *dvp, const char *name, 227 int namelen, 228 struct ucred *cred, struct proc *proc)); 229static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr, 230 int fnamelen, struct vnode *tdvp, 231 const char *tnameptr, int tnamelen, 232 struct ucred *cred, struct proc *proc)); 233static int nfs_renameit __P((struct vnode *sdvp, 234 struct componentname *scnp, 235 struct sillyrename *sp)); 236 237/* 238 * Global variables 239 */ 240extern u_long nfs_true, nfs_false; 241extern struct nfsstats nfsstats; 242extern nfstype nfsv3_type[9]; 243struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 244struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON]; 245int nfs_numasync = 0; 246#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) 247 248/* 249 * nfs access vnode op. 250 * For nfs version 2, just return ok. File accesses may fail later. 251 * For nfs version 3, use the access rpc to check accessibility. If file modes 252 * are changed on the server, accesses might still fail later. 253 */ 254static int 255nfs_access(ap) 256 struct vop_access_args /* { 257 struct vnode *a_vp; 258 int a_mode; 259 struct ucred *a_cred; 260 struct proc *a_p; 261 } */ *ap; 262{ 263 register struct vnode *vp = ap->a_vp; 264 register u_long *tl; 265 register caddr_t cp; 266 register int t1, t2; 267 caddr_t bpos, dpos, cp2; 268 int error = 0, attrflag; 269 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 270 u_long mode, rmode; 271 int v3 = NFS_ISV3(vp); 272 273 /* 274 * Disallow write attempts on filesystems mounted read-only; 275 * unless the file is a socket, fifo, or a block or character 276 * device resident on the filesystem. 277 */ 278 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 279 switch (vp->v_type) { 280 case VREG: 281 case VDIR: 282 case VLNK: 283 return (EROFS); 284 default: 285 break; 286 } 287 } 288 /* 289 * For nfs v3, do an access rpc, otherwise you are stuck emulating 290 * ufs_access() locally using the vattr. This may not be correct, 291 * since the server may apply other access criteria such as 292 * client uid-->server uid mapping that we do not know about, but 293 * this is better than just returning anything that is lying about 294 * in the cache. 295 */ 296 if (v3) { 297 nfsstats.rpccnt[NFSPROC_ACCESS]++; 298 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); 299 nfsm_fhtom(vp, v3); 300 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 301 if (ap->a_mode & VREAD) 302 mode = NFSV3ACCESS_READ; 303 else 304 mode = 0; 305 if (vp->v_type != VDIR) { 306 if (ap->a_mode & VWRITE) 307 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 308 if (ap->a_mode & VEXEC) 309 mode |= NFSV3ACCESS_EXECUTE; 310 } else { 311 if (ap->a_mode & VWRITE) 312 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 313 NFSV3ACCESS_DELETE); 314 if (ap->a_mode & VEXEC) 315 mode |= NFSV3ACCESS_LOOKUP; 316 } 317 *tl = txdr_unsigned(mode); 318 nfsm_request(vp, NFSPROC_ACCESS, ap->a_p, ap->a_cred); 319 nfsm_postop_attr(vp, attrflag); 320 if (!error) { 321 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 322 rmode = fxdr_unsigned(u_long, *tl); 323 /* 324 * The NFS V3 spec does not clarify whether or not 325 * the returned access bits can be a superset of 326 * the ones requested, so... 327 */ 328 if ((rmode & mode) != mode) 329 error = EACCES; 330 } 331 nfsm_reqdone; 332 return (error); 333 } else { 334 if (error = nfsspec_access(ap)) 335 return (error); 336 337 /* 338 * Attempt to prevent a mapped root from accessing a file 339 * which it shouldn't. We try to read a byte from the file 340 * if the user is root and the file is not zero length. 341 * After calling nfsspec_access, we should have the correct 342 * file size cached. 343 */ 344 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD) 345 && VTONFS(vp)->n_size > 0) { 346 struct iovec aiov; 347 struct uio auio; 348 char buf[1]; 349 350 aiov.iov_base = buf; 351 aiov.iov_len = 1; 352 auio.uio_iov = &aiov; 353 auio.uio_iovcnt = 1; 354 auio.uio_offset = 0; 355 auio.uio_resid = 1; 356 auio.uio_segflg = UIO_SYSSPACE; 357 auio.uio_rw = UIO_READ; 358 auio.uio_procp = ap->a_p; 359 360 if (vp->v_type == VREG) 361 error = nfs_readrpc(vp, &auio, ap->a_cred); 362 else if (vp->v_type == VDIR) { 363 char* bp; 364 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 365 aiov.iov_base = bp; 366 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; 367 error = nfs_readdirrpc(vp, &auio, ap->a_cred); 368 free(bp, M_TEMP); 369 } else if (vp->v_type = VLNK) 370 error = nfs_readlinkrpc(vp, &auio, ap->a_cred); 371 else 372 error = EACCES; 373 } 374 return (error); 375 } 376} 377 378/* 379 * nfs open vnode op 380 * Check to see if the type is ok 381 * and that deletion is not in progress. 382 * For paged in text files, you will need to flush the page cache 383 * if consistency is lost. 384 */ 385/* ARGSUSED */ 386static int 387nfs_open(ap) 388 struct vop_open_args /* { 389 struct vnode *a_vp; 390 int a_mode; 391 struct ucred *a_cred; 392 struct proc *a_p; 393 } */ *ap; 394{ 395 register struct vnode *vp = ap->a_vp; 396 struct nfsnode *np = VTONFS(vp); 397 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 398 struct vattr vattr; 399 int error; 400 401 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 402#ifdef DIAGNOSTIC 403 printf("open eacces vtyp=%d\n",vp->v_type); 404#endif 405 return (EACCES); 406 } 407 /* 408 * Get a valid lease. If cached data is stale, flush it. 409 */ 410 if (nmp->nm_flag & NFSMNT_NQNFS) { 411 if (NQNFS_CKINVALID(vp, np, ND_READ)) { 412 do { 413 error = nqnfs_getlease(vp, ND_READ, ap->a_cred, 414 ap->a_p); 415 } while (error == NQNFS_EXPIRED); 416 if (error) 417 return (error); 418 if (np->n_lrev != np->n_brev || 419 (np->n_flag & NQNFSNONCACHE)) { 420 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 421 ap->a_p, 1)) == EINTR) 422 return (error); 423 np->n_brev = np->n_lrev; 424 } 425 } 426 } else { 427 if (np->n_flag & NMODIFIED) { 428 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 429 ap->a_p, 1)) == EINTR) 430 return (error); 431 np->n_attrstamp = 0; 432 if (vp->v_type == VDIR) 433 np->n_direofoffset = 0; 434 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 435 if (error) 436 return (error); 437 np->n_mtime = vattr.va_mtime.tv_sec; 438 } else { 439 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 440 if (error) 441 return (error); 442 if (np->n_mtime != vattr.va_mtime.tv_sec) { 443 if (vp->v_type == VDIR) 444 np->n_direofoffset = 0; 445 if ((error = nfs_vinvalbuf(vp, V_SAVE, 446 ap->a_cred, ap->a_p, 1)) == EINTR) 447 return (error); 448 np->n_mtime = vattr.va_mtime.tv_sec; 449 } 450 } 451 } 452 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) 453 np->n_attrstamp = 0; /* For Open/Close consistency */ 454 return (0); 455} 456 457/* 458 * nfs close vnode op 459 * What an NFS client should do upon close after writing is a debatable issue. 460 * Most NFS clients push delayed writes to the server upon close, basically for 461 * two reasons: 462 * 1 - So that any write errors may be reported back to the client process 463 * doing the close system call. By far the two most likely errors are 464 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 465 * 2 - To put a worst case upper bound on cache inconsistency between 466 * multiple clients for the file. 467 * There is also a consistency problem for Version 2 of the protocol w.r.t. 468 * not being able to tell if other clients are writing a file concurrently, 469 * since there is no way of knowing if the changed modify time in the reply 470 * is only due to the write for this client. 471 * (NFS Version 3 provides weak cache consistency data in the reply that 472 * should be sufficient to detect and handle this case.) 473 * 474 * The current code does the following: 475 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 476 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 477 * or commit them (this satisfies 1 and 2 except for the 478 * case where the server crashes after this close but 479 * before the commit RPC, which is felt to be "good 480 * enough". Changing the last argument to nfs_flush() to 481 * a 1 would force a commit operation, if it is felt a 482 * commit is necessary now. 483 * for NQNFS - do nothing now, since 2 is dealt with via leases and 484 * 1 should be dealt with via an fsync() system call for 485 * cases where write errors are important. 486 */ 487/* ARGSUSED */ 488static int 489nfs_close(ap) 490 struct vop_close_args /* { 491 struct vnodeop_desc *a_desc; 492 struct vnode *a_vp; 493 int a_fflag; 494 struct ucred *a_cred; 495 struct proc *a_p; 496 } */ *ap; 497{ 498 register struct vnode *vp = ap->a_vp; 499 register struct nfsnode *np = VTONFS(vp); 500 int error = 0; 501 502 if (vp->v_type == VREG) { 503 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 && 504 (np->n_flag & NMODIFIED)) { 505 if (NFS_ISV3(vp)) { 506 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); 507 np->n_flag &= ~NMODIFIED; 508 } else 509 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1); 510 np->n_attrstamp = 0; 511 } 512 if (np->n_flag & NWRITEERR) { 513 np->n_flag &= ~NWRITEERR; 514 error = np->n_error; 515 } 516 } 517 return (error); 518} 519 520/* 521 * nfs getattr call from vfs. 522 */ 523static int 524nfs_getattr(ap) 525 struct vop_getattr_args /* { 526 struct vnode *a_vp; 527 struct vattr *a_vap; 528 struct ucred *a_cred; 529 struct proc *a_p; 530 } */ *ap; 531{ 532 register struct vnode *vp = ap->a_vp; 533 register struct nfsnode *np = VTONFS(vp); 534 register caddr_t cp; 535 register u_long *tl; 536 register int t1, t2; 537 caddr_t bpos, dpos; 538 int error = 0; 539 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 540 int v3 = NFS_ISV3(vp); 541 542 /* 543 * Update local times for special files. 544 */ 545 if (np->n_flag & (NACC | NUPD)) 546 np->n_flag |= NCHG; 547 /* 548 * First look in the cache. 549 */ 550 if (nfs_getattrcache(vp, ap->a_vap) == 0) 551 return (0); 552 nfsstats.rpccnt[NFSPROC_GETATTR]++; 553 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3)); 554 nfsm_fhtom(vp, v3); 555 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred); 556 if (!error) { 557 nfsm_loadattr(vp, ap->a_vap); 558 } 559 nfsm_reqdone; 560 return (error); 561} 562 563/* 564 * nfs setattr call. 565 */ 566static int 567nfs_setattr(ap) 568 struct vop_setattr_args /* { 569 struct vnodeop_desc *a_desc; 570 struct vnode *a_vp; 571 struct vattr *a_vap; 572 struct ucred *a_cred; 573 struct proc *a_p; 574 } */ *ap; 575{ 576 register struct vnode *vp = ap->a_vp; 577 register struct nfsnode *np = VTONFS(vp); 578 register struct vattr *vap = ap->a_vap; 579 int error = 0; 580 u_quad_t tsize; 581 582#ifndef nolint 583 tsize = (u_quad_t)0; 584#endif 585 /* 586 * Disallow write attempts if the filesystem is mounted read-only. 587 */ 588 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 589 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 590 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 591 (vp->v_mount->mnt_flag & MNT_RDONLY)) 592 return (EROFS); 593 if (vap->va_size != VNOVAL) { 594 switch (vp->v_type) { 595 case VDIR: 596 return (EISDIR); 597 case VCHR: 598 case VBLK: 599 case VSOCK: 600 case VFIFO: 601 if (vap->va_mtime.tv_sec == VNOVAL && 602 vap->va_atime.tv_sec == VNOVAL && 603 vap->va_mode == (u_short)VNOVAL && 604 vap->va_uid == (uid_t)VNOVAL && 605 vap->va_gid == (gid_t)VNOVAL) 606 return (0); 607 vap->va_size = VNOVAL; 608 break; 609 default: 610 /* 611 * Disallow write attempts if the filesystem is 612 * mounted read-only. 613 */ 614 if (vp->v_mount->mnt_flag & MNT_RDONLY) 615 return (EROFS); 616 if (np->n_flag & NMODIFIED) { 617 if (vap->va_size == 0) 618 error = nfs_vinvalbuf(vp, 0, 619 ap->a_cred, ap->a_p, 1); 620 else 621 error = nfs_vinvalbuf(vp, V_SAVE, 622 ap->a_cred, ap->a_p, 1); 623 if (error) 624 return (error); 625 } 626 tsize = np->n_size; 627 np->n_size = np->n_vattr.va_size = vap->va_size; 628 vnode_pager_setsize(vp, (u_long)np->n_size); 629 }; 630 } else if ((vap->va_mtime.tv_sec != VNOVAL || 631 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) && 632 vp->v_type == VREG && 633 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 634 ap->a_p, 1)) == EINTR) 635 return (error); 636 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); 637 if (error && vap->va_size != VNOVAL) { 638 np->n_size = np->n_vattr.va_size = tsize; 639 vnode_pager_setsize(vp, (u_long)np->n_size); 640 } 641 return (error); 642} 643 644/* 645 * Do an nfs setattr rpc. 646 */ 647static int 648nfs_setattrrpc(vp, vap, cred, procp) 649 register struct vnode *vp; 650 register struct vattr *vap; 651 struct ucred *cred; 652 struct proc *procp; 653{ 654 register struct nfsv2_sattr *sp; 655 register caddr_t cp; 656 register long t1, t2; 657 caddr_t bpos, dpos, cp2; 658 u_long *tl; 659 int error = 0, wccflag = NFSV3_WCCRATTR; 660 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 661 int v3 = NFS_ISV3(vp); 662 663 nfsstats.rpccnt[NFSPROC_SETATTR]++; 664 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); 665 nfsm_fhtom(vp, v3); 666 if (v3) { 667 if (vap->va_mode != (u_short)VNOVAL) { 668 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 669 *tl++ = nfs_true; 670 *tl = txdr_unsigned(vap->va_mode); 671 } else { 672 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 673 *tl = nfs_false; 674 } 675 if (vap->va_uid != (uid_t)VNOVAL) { 676 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 677 *tl++ = nfs_true; 678 *tl = txdr_unsigned(vap->va_uid); 679 } else { 680 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 681 *tl = nfs_false; 682 } 683 if (vap->va_gid != (gid_t)VNOVAL) { 684 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 685 *tl++ = nfs_true; 686 *tl = txdr_unsigned(vap->va_gid); 687 } else { 688 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 689 *tl = nfs_false; 690 } 691 if (vap->va_size != VNOVAL) { 692 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 693 *tl++ = nfs_true; 694 txdr_hyper(&vap->va_size, tl); 695 } else { 696 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 697 *tl = nfs_false; 698 } 699 if (vap->va_atime.tv_sec != VNOVAL) { 700 if (vap->va_atime.tv_sec != time_second) { 701 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 702 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); 703 txdr_nfsv3time(&vap->va_atime, tl); 704 } else { 705 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 706 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); 707 } 708 } else { 709 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 710 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); 711 } 712 if (vap->va_mtime.tv_sec != VNOVAL) { 713 if (vap->va_mtime.tv_sec != time_second) { 714 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 715 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); 716 txdr_nfsv3time(&vap->va_mtime, tl); 717 } else { 718 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 719 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); 720 } 721 } else { 722 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 723 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); 724 } 725 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 726 *tl = nfs_false; 727 } else { 728 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 729 if (vap->va_mode == (u_short)VNOVAL) 730 sp->sa_mode = VNOVAL; 731 else 732 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 733 if (vap->va_uid == (uid_t)VNOVAL) 734 sp->sa_uid = VNOVAL; 735 else 736 sp->sa_uid = txdr_unsigned(vap->va_uid); 737 if (vap->va_gid == (gid_t)VNOVAL) 738 sp->sa_gid = VNOVAL; 739 else 740 sp->sa_gid = txdr_unsigned(vap->va_gid); 741 sp->sa_size = txdr_unsigned(vap->va_size); 742 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 743 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 744 } 745 nfsm_request(vp, NFSPROC_SETATTR, procp, cred); 746 if (v3) { 747 nfsm_wcc_data(vp, wccflag); 748 } else 749 nfsm_loadattr(vp, (struct vattr *)0); 750 nfsm_reqdone; 751 return (error); 752} 753 754/* 755 * nfs lookup call, one step at a time... 756 * First look in cache 757 * If not found, unlock the directory nfsnode and do the rpc 758 */ 759static int 760nfs_lookup(ap) 761 struct vop_lookup_args /* { 762 struct vnodeop_desc *a_desc; 763 struct vnode *a_dvp; 764 struct vnode **a_vpp; 765 struct componentname *a_cnp; 766 } */ *ap; 767{ 768 struct componentname *cnp = ap->a_cnp; 769 struct vnode *dvp = ap->a_dvp; 770 struct vnode **vpp = ap->a_vpp; 771 int flags = cnp->cn_flags; 772 struct vnode *newvp; 773 u_long *tl; 774 caddr_t cp; 775 long t1, t2; 776 struct nfsmount *nmp; 777 caddr_t bpos, dpos, cp2; 778 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 779 long len; 780 nfsfh_t *fhp; 781 struct nfsnode *np; 782 int lockparent, wantparent, error = 0, attrflag, fhsize; 783 int v3 = NFS_ISV3(dvp); 784 struct proc *p = cnp->cn_proc; 785 786 *vpp = NULLVP; 787 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 788 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 789 return (EROFS); 790 if (dvp->v_type != VDIR) 791 return (ENOTDIR); 792 lockparent = flags & LOCKPARENT; 793 wantparent = flags & (LOCKPARENT|WANTPARENT); 794 nmp = VFSTONFS(dvp->v_mount); 795 np = VTONFS(dvp); 796 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) { 797 struct vattr vattr; 798 int vpid; 799 800 if (error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) { 801 *vpp = NULLVP; 802 return (error); 803 } 804 805 newvp = *vpp; 806 vpid = newvp->v_id; 807 /* 808 * See the comment starting `Step through' in ufs/ufs_lookup.c 809 * for an explanation of the locking protocol 810 */ 811 if (dvp == newvp) { 812 VREF(newvp); 813 error = 0; 814 } else if (flags & ISDOTDOT) { 815 VOP_UNLOCK(dvp, 0, p); 816 error = vget(newvp, LK_EXCLUSIVE, p); 817 if (!error && lockparent && (flags & ISLASTCN)) 818 error = vn_lock(dvp, LK_EXCLUSIVE, p); 819 } else { 820 error = vget(newvp, LK_EXCLUSIVE, p); 821 if (!lockparent || error || !(flags & ISLASTCN)) 822 VOP_UNLOCK(dvp, 0, p); 823 } 824 if (!error) { 825 if (vpid == newvp->v_id) { 826 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p) 827 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { 828 nfsstats.lookupcache_hits++; 829 if (cnp->cn_nameiop != LOOKUP && 830 (flags & ISLASTCN)) 831 cnp->cn_flags |= SAVENAME; 832 return (0); 833 } 834 cache_purge(newvp); 835 } 836 vput(newvp); 837 if (lockparent && dvp != newvp && (flags & ISLASTCN)) 838 VOP_UNLOCK(dvp, 0, p); 839 } 840 error = vn_lock(dvp, LK_EXCLUSIVE, p); 841 *vpp = NULLVP; 842 if (error) 843 return (error); 844 } 845 error = 0; 846 newvp = NULLVP; 847 nfsstats.lookupcache_misses++; 848 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 849 len = cnp->cn_namelen; 850 nfsm_reqhead(dvp, NFSPROC_LOOKUP, 851 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 852 nfsm_fhtom(dvp, v3); 853 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 854 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred); 855 if (error) { 856 nfsm_postop_attr(dvp, attrflag); 857 m_freem(mrep); 858 goto nfsmout; 859 } 860 nfsm_getfh(fhp, fhsize, v3); 861 862 /* 863 * Handle RENAME case... 864 */ 865 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 866 if (NFS_CMPFH(np, fhp, fhsize)) { 867 m_freem(mrep); 868 return (EISDIR); 869 } 870 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 871 if (error) { 872 m_freem(mrep); 873 return (error); 874 } 875 newvp = NFSTOV(np); 876 if (v3) { 877 nfsm_postop_attr(newvp, attrflag); 878 nfsm_postop_attr(dvp, attrflag); 879 } else 880 nfsm_loadattr(newvp, (struct vattr *)0); 881 *vpp = newvp; 882 m_freem(mrep); 883 cnp->cn_flags |= SAVENAME; 884 if (!lockparent) 885 VOP_UNLOCK(dvp, 0, p); 886 return (0); 887 } 888 889 if (flags & ISDOTDOT) { 890 VOP_UNLOCK(dvp, 0, p); 891 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 892 if (error) { 893 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 894 return (error); 895 } 896 newvp = NFSTOV(np); 897 if (lockparent && (flags & ISLASTCN) && 898 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) { 899 vput(newvp); 900 return (error); 901 } 902 } else if (NFS_CMPFH(np, fhp, fhsize)) { 903 VREF(dvp); 904 newvp = dvp; 905 } else { 906 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 907 if (error) { 908 m_freem(mrep); 909 return (error); 910 } 911 if (!lockparent || !(flags & ISLASTCN)) 912 VOP_UNLOCK(dvp, 0, p); 913 newvp = NFSTOV(np); 914 } 915 if (v3) { 916 nfsm_postop_attr(newvp, attrflag); 917 nfsm_postop_attr(dvp, attrflag); 918 } else 919 nfsm_loadattr(newvp, (struct vattr *)0); 920 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 921 cnp->cn_flags |= SAVENAME; 922 if ((cnp->cn_flags & MAKEENTRY) && 923 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 924 np->n_ctime = np->n_vattr.va_ctime.tv_sec; 925 cache_enter(dvp, newvp, cnp); 926 } 927 *vpp = newvp; 928 nfsm_reqdone; 929 if (error) { 930 if (newvp != NULLVP) { 931 vrele(newvp); 932 *vpp = NULLVP; 933 } 934 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 935 (flags & ISLASTCN) && error == ENOENT) { 936 if (!lockparent) 937 VOP_UNLOCK(dvp, 0, p); 938 if (dvp->v_mount->mnt_flag & MNT_RDONLY) 939 error = EROFS; 940 else 941 error = EJUSTRETURN; 942 } 943 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 944 cnp->cn_flags |= SAVENAME; 945 } 946 return (error); 947} 948 949/* 950 * nfs read call. 951 * Just call nfs_bioread() to do the work. 952 */ 953static int 954nfs_read(ap) 955 struct vop_read_args /* { 956 struct vnode *a_vp; 957 struct uio *a_uio; 958 int a_ioflag; 959 struct ucred *a_cred; 960 } */ *ap; 961{ 962 register struct vnode *vp = ap->a_vp; 963 964 if (vp->v_type != VREG) 965 return (EPERM); 966 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); 967} 968 969/* 970 * nfs readlink call 971 */ 972static int 973nfs_readlink(ap) 974 struct vop_readlink_args /* { 975 struct vnode *a_vp; 976 struct uio *a_uio; 977 struct ucred *a_cred; 978 } */ *ap; 979{ 980 register struct vnode *vp = ap->a_vp; 981 982 if (vp->v_type != VLNK) 983 return (EPERM); 984 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); 985} 986 987/* 988 * Do a readlink rpc. 989 * Called by nfs_doio() from below the buffer cache. 990 */ 991int 992nfs_readlinkrpc(vp, uiop, cred) 993 register struct vnode *vp; 994 struct uio *uiop; 995 struct ucred *cred; 996{ 997 register u_long *tl; 998 register caddr_t cp; 999 register long t1, t2; 1000 caddr_t bpos, dpos, cp2; 1001 int error = 0, len, attrflag; 1002 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1003 int v3 = NFS_ISV3(vp); 1004 1005 nfsstats.rpccnt[NFSPROC_READLINK]++; 1006 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3)); 1007 nfsm_fhtom(vp, v3); 1008 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred); 1009 if (v3) 1010 nfsm_postop_attr(vp, attrflag); 1011 if (!error) { 1012 nfsm_strsiz(len, NFS_MAXPATHLEN); 1013 nfsm_mtouio(uiop, len); 1014 } 1015 nfsm_reqdone; 1016 return (error); 1017} 1018 1019/* 1020 * nfs read rpc call 1021 * Ditto above 1022 */ 1023int 1024nfs_readrpc(vp, uiop, cred) 1025 register struct vnode *vp; 1026 struct uio *uiop; 1027 struct ucred *cred; 1028{ 1029 register u_long *tl; 1030 register caddr_t cp; 1031 register long t1, t2; 1032 caddr_t bpos, dpos, cp2; 1033 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1034 struct nfsmount *nmp; 1035 int error = 0, len, retlen, tsiz, eof, attrflag; 1036 int v3 = NFS_ISV3(vp); 1037 1038#ifndef nolint 1039 eof = 0; 1040#endif 1041 nmp = VFSTONFS(vp->v_mount); 1042 tsiz = uiop->uio_resid; 1043 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1044 return (EFBIG); 1045 while (tsiz > 0) { 1046 nfsstats.rpccnt[NFSPROC_READ]++; 1047 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1048 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); 1049 nfsm_fhtom(vp, v3); 1050 nfsm_build(tl, u_long *, NFSX_UNSIGNED * 3); 1051 if (v3) { 1052 txdr_hyper(&uiop->uio_offset, tl); 1053 *(tl + 2) = txdr_unsigned(len); 1054 } else { 1055 *tl++ = txdr_unsigned(uiop->uio_offset); 1056 *tl++ = txdr_unsigned(len); 1057 *tl = 0; 1058 } 1059 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred); 1060 if (v3) { 1061 nfsm_postop_attr(vp, attrflag); 1062 if (error) { 1063 m_freem(mrep); 1064 goto nfsmout; 1065 } 1066 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 1067 eof = fxdr_unsigned(int, *(tl + 1)); 1068 } else 1069 nfsm_loadattr(vp, (struct vattr *)0); 1070 nfsm_strsiz(retlen, nmp->nm_rsize); 1071 nfsm_mtouio(uiop, retlen); 1072 m_freem(mrep); 1073 tsiz -= retlen; 1074 if (v3) { 1075 if (eof || retlen == 0) 1076 tsiz = 0; 1077 } else if (retlen < len) 1078 tsiz = 0; 1079 } 1080nfsmout: 1081 return (error); 1082} 1083 1084/* 1085 * nfs write call 1086 */ 1087int 1088nfs_writerpc(vp, uiop, cred, iomode, must_commit) 1089 register struct vnode *vp; 1090 register struct uio *uiop; 1091 struct ucred *cred; 1092 int *iomode, *must_commit; 1093{ 1094 register u_long *tl; 1095 register caddr_t cp; 1096 register int t1, t2, backup; 1097 caddr_t bpos, dpos, cp2; 1098 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1099 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1100 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; 1101 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC; 1102 1103#ifndef DIAGNOSTIC 1104 if (uiop->uio_iovcnt != 1) 1105 panic("nfs: writerpc iovcnt > 1"); 1106#endif 1107 *must_commit = 0; 1108 tsiz = uiop->uio_resid; 1109 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1110 return (EFBIG); 1111 while (tsiz > 0) { 1112 nfsstats.rpccnt[NFSPROC_WRITE]++; 1113 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; 1114 nfsm_reqhead(vp, NFSPROC_WRITE, 1115 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); 1116 nfsm_fhtom(vp, v3); 1117 if (v3) { 1118 nfsm_build(tl, u_long *, 5 * NFSX_UNSIGNED); 1119 txdr_hyper(&uiop->uio_offset, tl); 1120 tl += 2; 1121 *tl++ = txdr_unsigned(len); 1122 *tl++ = txdr_unsigned(*iomode); 1123 } else { 1124 nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); 1125 *++tl = txdr_unsigned(uiop->uio_offset); 1126 tl += 2; 1127 } 1128 *tl = txdr_unsigned(len); 1129 nfsm_uiotom(uiop, len); 1130 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred); 1131 if (v3) { 1132 wccflag = NFSV3_WCCCHK; 1133 nfsm_wcc_data(vp, wccflag); 1134 if (!error) { 1135 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED + 1136 NFSX_V3WRITEVERF); 1137 rlen = fxdr_unsigned(int, *tl++); 1138 if (rlen == 0) { 1139 error = NFSERR_IO; 1140 break; 1141 } else if (rlen < len) { 1142 backup = len - rlen; 1143 uiop->uio_iov->iov_base -= backup; 1144 uiop->uio_iov->iov_len += backup; 1145 uiop->uio_offset -= backup; 1146 uiop->uio_resid += backup; 1147 len = rlen; 1148 } 1149 commit = fxdr_unsigned(int, *tl++); 1150 1151 /* 1152 * Return the lowest committment level 1153 * obtained by any of the RPCs. 1154 */ 1155 if (committed == NFSV3WRITE_FILESYNC) 1156 committed = commit; 1157 else if (committed == NFSV3WRITE_DATASYNC && 1158 commit == NFSV3WRITE_UNSTABLE) 1159 committed = commit; 1160 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){ 1161 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1162 NFSX_V3WRITEVERF); 1163 nmp->nm_state |= NFSSTA_HASWRITEVERF; 1164 } else if (bcmp((caddr_t)tl, 1165 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) { 1166 *must_commit = 1; 1167 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1168 NFSX_V3WRITEVERF); 1169 } 1170 } 1171 } else 1172 nfsm_loadattr(vp, (struct vattr *)0); 1173 if (wccflag) 1174 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec; 1175 m_freem(mrep); 1176 tsiz -= len; 1177 } 1178nfsmout: 1179 if (vp->v_mount->mnt_flag & MNT_ASYNC) 1180 committed = NFSV3WRITE_FILESYNC; 1181 *iomode = committed; 1182 if (error) 1183 uiop->uio_resid = tsiz; 1184 return (error); 1185} 1186 1187/* 1188 * nfs mknod rpc 1189 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1190 * mode set to specify the file type and the size field for rdev. 1191 */ 1192static int 1193nfs_mknodrpc(dvp, vpp, cnp, vap) 1194 register struct vnode *dvp; 1195 register struct vnode **vpp; 1196 register struct componentname *cnp; 1197 register struct vattr *vap; 1198{ 1199 register struct nfsv2_sattr *sp; 1200 register struct nfsv3_sattr *sp3; 1201 register u_long *tl; 1202 register caddr_t cp; 1203 register long t1, t2; 1204 struct vnode *newvp = (struct vnode *)0; 1205 struct nfsnode *np = (struct nfsnode *)0; 1206 struct vattr vattr; 1207 char *cp2; 1208 caddr_t bpos, dpos; 1209 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1210 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1211 u_long rdev; 1212 int v3 = NFS_ISV3(dvp); 1213 1214 if (vap->va_type == VCHR || vap->va_type == VBLK) 1215 rdev = txdr_unsigned(vap->va_rdev); 1216 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1217 rdev = 0xffffffff; 1218 else { 1219 VOP_ABORTOP(dvp, cnp); 1220 return (EOPNOTSUPP); 1221 } 1222 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) { 1223 VOP_ABORTOP(dvp, cnp); 1224 return (error); 1225 } 1226 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1227 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1228 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1229 nfsm_fhtom(dvp, v3); 1230 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1231 if (v3) { 1232 nfsm_build(tl, u_long *, NFSX_UNSIGNED + NFSX_V3SRVSATTR); 1233 *tl++ = vtonfsv3_type(vap->va_type); 1234 sp3 = (struct nfsv3_sattr *)tl; 1235 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); 1236 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1237 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 1238 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1239 *tl = txdr_unsigned(minor(vap->va_rdev)); 1240 } 1241 } else { 1242 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1243 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1244 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1245 sp->sa_gid = txdr_unsigned(vattr.va_gid); 1246 sp->sa_size = rdev; 1247 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1248 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1249 } 1250 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred); 1251 if (!error) { 1252 nfsm_mtofh(dvp, newvp, v3, gotvp); 1253 if (!gotvp) { 1254 if (newvp) { 1255 vput(newvp); 1256 newvp = (struct vnode *)0; 1257 } 1258 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1259 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1260 if (!error) 1261 newvp = NFSTOV(np); 1262 } 1263 } 1264 if (v3) 1265 nfsm_wcc_data(dvp, wccflag); 1266 nfsm_reqdone; 1267 if (error) { 1268 if (newvp) 1269 vput(newvp); 1270 } else { 1271 if (cnp->cn_flags & MAKEENTRY) 1272 cache_enter(dvp, newvp, cnp); 1273 *vpp = newvp; 1274 } 1275 zfree(namei_zone, cnp->cn_pnbuf); 1276 VTONFS(dvp)->n_flag |= NMODIFIED; 1277 if (!wccflag) 1278 VTONFS(dvp)->n_attrstamp = 0; 1279 return (error); 1280} 1281 1282/* 1283 * nfs mknod vop 1284 * just call nfs_mknodrpc() to do the work. 1285 */ 1286/* ARGSUSED */ 1287static int 1288nfs_mknod(ap) 1289 struct vop_mknod_args /* { 1290 struct vnode *a_dvp; 1291 struct vnode **a_vpp; 1292 struct componentname *a_cnp; 1293 struct vattr *a_vap; 1294 } */ *ap; 1295{ 1296 struct vnode *newvp; 1297 int error; 1298 1299 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); 1300 if (!error) 1301 vput(newvp); 1302 return (error); 1303} 1304 1305static u_long create_verf; 1306/* 1307 * nfs file create call 1308 */ 1309static int 1310nfs_create(ap) 1311 struct vop_create_args /* { 1312 struct vnode *a_dvp; 1313 struct vnode **a_vpp; 1314 struct componentname *a_cnp; 1315 struct vattr *a_vap; 1316 } */ *ap; 1317{ 1318 register struct vnode *dvp = ap->a_dvp; 1319 register struct vattr *vap = ap->a_vap; 1320 register struct componentname *cnp = ap->a_cnp; 1321 register struct nfsv2_sattr *sp; 1322 register struct nfsv3_sattr *sp3; 1323 register u_long *tl; 1324 register caddr_t cp; 1325 register long t1, t2; 1326 struct nfsnode *np = (struct nfsnode *)0; 1327 struct vnode *newvp = (struct vnode *)0; 1328 caddr_t bpos, dpos, cp2; 1329 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1330 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1331 struct vattr vattr; 1332 int v3 = NFS_ISV3(dvp); 1333 1334 /* 1335 * Oops, not for me.. 1336 */ 1337 if (vap->va_type == VSOCK) 1338 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1339 1340 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) { 1341 VOP_ABORTOP(dvp, cnp); 1342 return (error); 1343 } 1344 if (vap->va_vaflags & VA_EXCLUSIVE) 1345 fmode |= O_EXCL; 1346again: 1347 nfsstats.rpccnt[NFSPROC_CREATE]++; 1348 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1349 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1350 nfsm_fhtom(dvp, v3); 1351 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1352 if (v3) { 1353 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1354 if (fmode & O_EXCL) { 1355 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1356 nfsm_build(tl, u_long *, NFSX_V3CREATEVERF); 1357#ifdef INET 1358 if (!TAILQ_EMPTY(&in_ifaddrhead)) 1359 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr; 1360 else 1361#endif 1362 *tl++ = create_verf; 1363 *tl = ++create_verf; 1364 } else { 1365 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1366 nfsm_build(tl, u_long *, NFSX_V3SRVSATTR); 1367 sp3 = (struct nfsv3_sattr *)tl; 1368 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); 1369 } 1370 } else { 1371 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1372 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1373 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1374 sp->sa_gid = txdr_unsigned(vattr.va_gid); 1375 sp->sa_size = 0; 1376 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1377 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1378 } 1379 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred); 1380 if (!error) { 1381 nfsm_mtofh(dvp, newvp, v3, gotvp); 1382 if (!gotvp) { 1383 if (newvp) { 1384 vput(newvp); 1385 newvp = (struct vnode *)0; 1386 } 1387 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1388 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1389 if (!error) 1390 newvp = NFSTOV(np); 1391 } 1392 } 1393 if (v3) 1394 nfsm_wcc_data(dvp, wccflag); 1395 nfsm_reqdone; 1396 if (error) { 1397 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { 1398 fmode &= ~O_EXCL; 1399 goto again; 1400 } 1401 if (newvp) 1402 vput(newvp); 1403 } else if (v3 && (fmode & O_EXCL)) 1404 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); 1405 if (!error) { 1406 if (cnp->cn_flags & MAKEENTRY) 1407 cache_enter(dvp, newvp, cnp); 1408 *ap->a_vpp = newvp; 1409 } 1410 zfree(namei_zone, cnp->cn_pnbuf); 1411 VTONFS(dvp)->n_flag |= NMODIFIED; 1412 if (!wccflag) 1413 VTONFS(dvp)->n_attrstamp = 0; 1414 return (error); 1415} 1416 1417/* 1418 * nfs file remove call 1419 * To try and make nfs semantics closer to ufs semantics, a file that has 1420 * other processes using the vnode is renamed instead of removed and then 1421 * removed later on the last close. 1422 * - If v_usecount > 1 1423 * If a rename is not already in the works 1424 * call nfs_sillyrename() to set it up 1425 * else 1426 * do the remove rpc 1427 */ 1428static int 1429nfs_remove(ap) 1430 struct vop_remove_args /* { 1431 struct vnodeop_desc *a_desc; 1432 struct vnode * a_dvp; 1433 struct vnode * a_vp; 1434 struct componentname * a_cnp; 1435 } */ *ap; 1436{ 1437 register struct vnode *vp = ap->a_vp; 1438 register struct vnode *dvp = ap->a_dvp; 1439 register struct componentname *cnp = ap->a_cnp; 1440 register struct nfsnode *np = VTONFS(vp); 1441 int error = 0; 1442 struct vattr vattr; 1443 1444#ifndef DIAGNOSTIC 1445 if ((cnp->cn_flags & HASBUF) == 0) 1446 panic("nfs_remove: no name"); 1447 if (vp->v_usecount < 1) 1448 panic("nfs_remove: bad v_usecount"); 1449#endif 1450 if (vp->v_usecount == 1 || (np->n_sillyrename && 1451 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && 1452 vattr.va_nlink > 1)) { 1453 /* 1454 * Purge the name cache so that the chance of a lookup for 1455 * the name succeeding while the remove is in progress is 1456 * minimized. Without node locking it can still happen, such 1457 * that an I/O op returns ESTALE, but since you get this if 1458 * another host removes the file.. 1459 */ 1460 cache_purge(vp); 1461 /* 1462 * throw away biocache buffers, mainly to avoid 1463 * unnecessary delayed writes later. 1464 */ 1465 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1); 1466 /* Do the rpc */ 1467 if (error != EINTR) 1468 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1469 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); 1470 /* 1471 * Kludge City: If the first reply to the remove rpc is lost.. 1472 * the reply to the retransmitted request will be ENOENT 1473 * since the file was in fact removed 1474 * Therefore, we cheat and return success. 1475 */ 1476 if (error == ENOENT) 1477 error = 0; 1478 } else if (!np->n_sillyrename) 1479 error = nfs_sillyrename(dvp, vp, cnp); 1480 zfree(namei_zone, cnp->cn_pnbuf); 1481 np->n_attrstamp = 0; 1482 return (error); 1483} 1484 1485/* 1486 * nfs file remove rpc called from nfs_inactive 1487 */ 1488int 1489nfs_removeit(sp) 1490 register struct sillyrename *sp; 1491{ 1492 1493 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1494 (struct proc *)0)); 1495} 1496 1497/* 1498 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1499 */ 1500static int 1501nfs_removerpc(dvp, name, namelen, cred, proc) 1502 register struct vnode *dvp; 1503 const char *name; 1504 int namelen; 1505 struct ucred *cred; 1506 struct proc *proc; 1507{ 1508 register u_long *tl; 1509 register caddr_t cp; 1510 register long t1, t2; 1511 caddr_t bpos, dpos, cp2; 1512 int error = 0, wccflag = NFSV3_WCCRATTR; 1513 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1514 int v3 = NFS_ISV3(dvp); 1515 1516 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1517 nfsm_reqhead(dvp, NFSPROC_REMOVE, 1518 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); 1519 nfsm_fhtom(dvp, v3); 1520 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1521 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred); 1522 if (v3) 1523 nfsm_wcc_data(dvp, wccflag); 1524 nfsm_reqdone; 1525 VTONFS(dvp)->n_flag |= NMODIFIED; 1526 if (!wccflag) 1527 VTONFS(dvp)->n_attrstamp = 0; 1528 return (error); 1529} 1530 1531/* 1532 * nfs file rename call 1533 */ 1534static int 1535nfs_rename(ap) 1536 struct vop_rename_args /* { 1537 struct vnode *a_fdvp; 1538 struct vnode *a_fvp; 1539 struct componentname *a_fcnp; 1540 struct vnode *a_tdvp; 1541 struct vnode *a_tvp; 1542 struct componentname *a_tcnp; 1543 } */ *ap; 1544{ 1545 register struct vnode *fvp = ap->a_fvp; 1546 register struct vnode *tvp = ap->a_tvp; 1547 register struct vnode *fdvp = ap->a_fdvp; 1548 register struct vnode *tdvp = ap->a_tdvp; 1549 register struct componentname *tcnp = ap->a_tcnp; 1550 register struct componentname *fcnp = ap->a_fcnp; 1551 int error; 1552 1553#ifndef DIAGNOSTIC 1554 if ((tcnp->cn_flags & HASBUF) == 0 || 1555 (fcnp->cn_flags & HASBUF) == 0) 1556 panic("nfs_rename: no name"); 1557#endif 1558 /* Check for cross-device rename */ 1559 if ((fvp->v_mount != tdvp->v_mount) || 1560 (tvp && (fvp->v_mount != tvp->v_mount))) { 1561 error = EXDEV; 1562 goto out; 1563 } 1564 1565 /* 1566 * If the tvp exists and is in use, sillyrename it before doing the 1567 * rename of the new file over it. 1568 * XXX Can't sillyrename a directory. 1569 */ 1570 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1571 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1572 vput(tvp); 1573 tvp = NULL; 1574 } 1575 1576 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1577 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1578 tcnp->cn_proc); 1579 1580 if (fvp->v_type == VDIR) { 1581 if (tvp != NULL && tvp->v_type == VDIR) 1582 cache_purge(tdvp); 1583 cache_purge(fdvp); 1584 } 1585out: 1586 if (tdvp == tvp) 1587 vrele(tdvp); 1588 else 1589 vput(tdvp); 1590 if (tvp) 1591 vput(tvp); 1592 vrele(fdvp); 1593 vrele(fvp); 1594 /* 1595 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1596 */ 1597 if (error == ENOENT) 1598 error = 0; 1599 return (error); 1600} 1601 1602/* 1603 * nfs file rename rpc called from nfs_remove() above 1604 */ 1605static int 1606nfs_renameit(sdvp, scnp, sp) 1607 struct vnode *sdvp; 1608 struct componentname *scnp; 1609 register struct sillyrename *sp; 1610{ 1611 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1612 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc)); 1613} 1614 1615/* 1616 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1617 */ 1618static int 1619nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc) 1620 register struct vnode *fdvp; 1621 const char *fnameptr; 1622 int fnamelen; 1623 register struct vnode *tdvp; 1624 const char *tnameptr; 1625 int tnamelen; 1626 struct ucred *cred; 1627 struct proc *proc; 1628{ 1629 register u_long *tl; 1630 register caddr_t cp; 1631 register long t1, t2; 1632 caddr_t bpos, dpos, cp2; 1633 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1634 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1635 int v3 = NFS_ISV3(fdvp); 1636 1637 nfsstats.rpccnt[NFSPROC_RENAME]++; 1638 nfsm_reqhead(fdvp, NFSPROC_RENAME, 1639 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + 1640 nfsm_rndup(tnamelen)); 1641 nfsm_fhtom(fdvp, v3); 1642 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1643 nfsm_fhtom(tdvp, v3); 1644 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1645 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred); 1646 if (v3) { 1647 nfsm_wcc_data(fdvp, fwccflag); 1648 nfsm_wcc_data(tdvp, twccflag); 1649 } 1650 nfsm_reqdone; 1651 VTONFS(fdvp)->n_flag |= NMODIFIED; 1652 VTONFS(tdvp)->n_flag |= NMODIFIED; 1653 if (!fwccflag) 1654 VTONFS(fdvp)->n_attrstamp = 0; 1655 if (!twccflag) 1656 VTONFS(tdvp)->n_attrstamp = 0; 1657 return (error); 1658} 1659 1660/* 1661 * nfs hard link create call 1662 */ 1663static int 1664nfs_link(ap) 1665 struct vop_link_args /* { 1666 struct vnode *a_tdvp; 1667 struct vnode *a_vp; 1668 struct componentname *a_cnp; 1669 } */ *ap; 1670{ 1671 register struct vnode *vp = ap->a_vp; 1672 register struct vnode *tdvp = ap->a_tdvp; 1673 register struct componentname *cnp = ap->a_cnp; 1674 register u_long *tl; 1675 register caddr_t cp; 1676 register long t1, t2; 1677 caddr_t bpos, dpos, cp2; 1678 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 1679 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1680 int v3 = NFS_ISV3(vp); 1681 1682 if (vp->v_mount != tdvp->v_mount) { 1683 VOP_ABORTOP(tdvp, cnp); 1684 return (EXDEV); 1685 } 1686 1687 /* 1688 * Push all writes to the server, so that the attribute cache 1689 * doesn't get "out of sync" with the server. 1690 * XXX There should be a better way! 1691 */ 1692 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); 1693 1694 nfsstats.rpccnt[NFSPROC_LINK]++; 1695 nfsm_reqhead(vp, NFSPROC_LINK, 1696 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1697 nfsm_fhtom(vp, v3); 1698 nfsm_fhtom(tdvp, v3); 1699 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1700 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred); 1701 if (v3) { 1702 nfsm_postop_attr(vp, attrflag); 1703 nfsm_wcc_data(tdvp, wccflag); 1704 } 1705 nfsm_reqdone; 1706 zfree(namei_zone, cnp->cn_pnbuf); 1707 VTONFS(tdvp)->n_flag |= NMODIFIED; 1708 if (!attrflag) 1709 VTONFS(vp)->n_attrstamp = 0; 1710 if (!wccflag) 1711 VTONFS(tdvp)->n_attrstamp = 0; 1712 /* 1713 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 1714 */ 1715 if (error == EEXIST) 1716 error = 0; 1717 return (error); 1718} 1719 1720/* 1721 * nfs symbolic link create call 1722 */ 1723static int 1724nfs_symlink(ap) 1725 struct vop_symlink_args /* { 1726 struct vnode *a_dvp; 1727 struct vnode **a_vpp; 1728 struct componentname *a_cnp; 1729 struct vattr *a_vap; 1730 char *a_target; 1731 } */ *ap; 1732{ 1733 register struct vnode *dvp = ap->a_dvp; 1734 register struct vattr *vap = ap->a_vap; 1735 register struct componentname *cnp = ap->a_cnp; 1736 register struct nfsv2_sattr *sp; 1737 register struct nfsv3_sattr *sp3; 1738 register u_long *tl; 1739 register caddr_t cp; 1740 register long t1, t2; 1741 caddr_t bpos, dpos, cp2; 1742 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 1743 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1744 struct vnode *newvp = (struct vnode *)0; 1745 int v3 = NFS_ISV3(dvp); 1746 1747 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 1748 slen = strlen(ap->a_target); 1749 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + 1750 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 1751 nfsm_fhtom(dvp, v3); 1752 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1753 if (v3) { 1754 nfsm_build(sp3, struct nfsv3_sattr *, NFSX_V3SRVSATTR); 1755 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, 1756 cnp->cn_cred->cr_gid); 1757 } 1758 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 1759 if (!v3) { 1760 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1761 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 1762 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1763 sp->sa_gid = txdr_unsigned(cnp->cn_cred->cr_gid); 1764 sp->sa_size = -1; 1765 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1766 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1767 } 1768 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred); 1769 if (v3) { 1770 if (!error) 1771 nfsm_mtofh(dvp, newvp, v3, gotvp); 1772 nfsm_wcc_data(dvp, wccflag); 1773 } 1774 nfsm_reqdone; 1775 if (newvp) 1776 vput(newvp); 1777 zfree(namei_zone, cnp->cn_pnbuf); 1778 VTONFS(dvp)->n_flag |= NMODIFIED; 1779 if (!wccflag) 1780 VTONFS(dvp)->n_attrstamp = 0; 1781 /* 1782 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 1783 */ 1784 if (error == EEXIST) 1785 error = 0; 1786 return (error); 1787} 1788 1789/* 1790 * nfs make dir call 1791 */ 1792static int 1793nfs_mkdir(ap) 1794 struct vop_mkdir_args /* { 1795 struct vnode *a_dvp; 1796 struct vnode **a_vpp; 1797 struct componentname *a_cnp; 1798 struct vattr *a_vap; 1799 } */ *ap; 1800{ 1801 register struct vnode *dvp = ap->a_dvp; 1802 register struct vattr *vap = ap->a_vap; 1803 register struct componentname *cnp = ap->a_cnp; 1804 register struct nfsv2_sattr *sp; 1805 register struct nfsv3_sattr *sp3; 1806 register u_long *tl; 1807 register caddr_t cp; 1808 register long t1, t2; 1809 register int len; 1810 struct nfsnode *np = (struct nfsnode *)0; 1811 struct vnode *newvp = (struct vnode *)0; 1812 caddr_t bpos, dpos, cp2; 1813 int error = 0, wccflag = NFSV3_WCCRATTR; 1814 int gotvp = 0; 1815 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1816 struct vattr vattr; 1817 int v3 = NFS_ISV3(dvp); 1818 1819 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) { 1820 VOP_ABORTOP(dvp, cnp); 1821 return (error); 1822 } 1823 len = cnp->cn_namelen; 1824 nfsstats.rpccnt[NFSPROC_MKDIR]++; 1825 nfsm_reqhead(dvp, NFSPROC_MKDIR, 1826 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); 1827 nfsm_fhtom(dvp, v3); 1828 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 1829 if (v3) { 1830 nfsm_build(sp3, struct nfsv3_sattr *, NFSX_V3SRVSATTR); 1831 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); 1832 } else { 1833 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1834 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 1835 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1836 sp->sa_gid = txdr_unsigned(vattr.va_gid); 1837 sp->sa_size = -1; 1838 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1839 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1840 } 1841 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred); 1842 if (!error) 1843 nfsm_mtofh(dvp, newvp, v3, gotvp); 1844 if (v3) 1845 nfsm_wcc_data(dvp, wccflag); 1846 nfsm_reqdone; 1847 VTONFS(dvp)->n_flag |= NMODIFIED; 1848 if (!wccflag) 1849 VTONFS(dvp)->n_attrstamp = 0; 1850 /* 1851 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 1852 * if we can succeed in looking up the directory. 1853 */ 1854 if (error == EEXIST || (!error && !gotvp)) { 1855 if (newvp) { 1856 vrele(newvp); 1857 newvp = (struct vnode *)0; 1858 } 1859 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 1860 cnp->cn_proc, &np); 1861 if (!error) { 1862 newvp = NFSTOV(np); 1863 if (newvp->v_type != VDIR) 1864 error = EEXIST; 1865 } 1866 } 1867 if (error) { 1868 if (newvp) 1869 vrele(newvp); 1870 } else 1871 *ap->a_vpp = newvp; 1872 zfree(namei_zone, cnp->cn_pnbuf); 1873 return (error); 1874} 1875 1876/* 1877 * nfs remove directory call 1878 */ 1879static int 1880nfs_rmdir(ap) 1881 struct vop_rmdir_args /* { 1882 struct vnode *a_dvp; 1883 struct vnode *a_vp; 1884 struct componentname *a_cnp; 1885 } */ *ap; 1886{ 1887 register struct vnode *vp = ap->a_vp; 1888 register struct vnode *dvp = ap->a_dvp; 1889 register struct componentname *cnp = ap->a_cnp; 1890 register u_long *tl; 1891 register caddr_t cp; 1892 register long t1, t2; 1893 caddr_t bpos, dpos, cp2; 1894 int error = 0, wccflag = NFSV3_WCCRATTR; 1895 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1896 int v3 = NFS_ISV3(dvp); 1897 1898 nfsstats.rpccnt[NFSPROC_RMDIR]++; 1899 nfsm_reqhead(dvp, NFSPROC_RMDIR, 1900 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1901 nfsm_fhtom(dvp, v3); 1902 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1903 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred); 1904 if (v3) 1905 nfsm_wcc_data(dvp, wccflag); 1906 nfsm_reqdone; 1907 zfree(namei_zone, cnp->cn_pnbuf); 1908 VTONFS(dvp)->n_flag |= NMODIFIED; 1909 if (!wccflag) 1910 VTONFS(dvp)->n_attrstamp = 0; 1911 cache_purge(dvp); 1912 cache_purge(vp); 1913 /* 1914 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 1915 */ 1916 if (error == ENOENT) 1917 error = 0; 1918 return (error); 1919} 1920 1921/* 1922 * nfs readdir call 1923 */ 1924static int 1925nfs_readdir(ap) 1926 struct vop_readdir_args /* { 1927 struct vnode *a_vp; 1928 struct uio *a_uio; 1929 struct ucred *a_cred; 1930 } */ *ap; 1931{ 1932 register struct vnode *vp = ap->a_vp; 1933 register struct nfsnode *np = VTONFS(vp); 1934 register struct uio *uio = ap->a_uio; 1935 int tresid, error; 1936 struct vattr vattr; 1937 1938 if (vp->v_type != VDIR) 1939 return (EPERM); 1940 /* 1941 * First, check for hit on the EOF offset cache 1942 */ 1943 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && 1944 (np->n_flag & NMODIFIED) == 0) { 1945 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) { 1946 if (NQNFS_CKCACHABLE(vp, ND_READ)) { 1947 nfsstats.direofcache_hits++; 1948 return (0); 1949 } 1950 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && 1951 np->n_mtime == vattr.va_mtime.tv_sec) { 1952 nfsstats.direofcache_hits++; 1953 return (0); 1954 } 1955 } 1956 1957 /* 1958 * Call nfs_bioread() to do the real work. 1959 */ 1960 tresid = uio->uio_resid; 1961 error = nfs_bioread(vp, uio, 0, ap->a_cred, 0); 1962 1963 if (!error && uio->uio_resid == tresid) 1964 nfsstats.direofcache_misses++; 1965 return (error); 1966} 1967 1968/* 1969 * Readdir rpc call. 1970 * Called from below the buffer cache by nfs_doio(). 1971 */ 1972int 1973nfs_readdirrpc(vp, uiop, cred) 1974 struct vnode *vp; 1975 register struct uio *uiop; 1976 struct ucred *cred; 1977 1978{ 1979 register int len, left; 1980 register struct dirent *dp; 1981 register u_long *tl; 1982 register caddr_t cp; 1983 register long t1, t2; 1984 register nfsuint64 *cookiep; 1985 caddr_t bpos, dpos, cp2; 1986 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1987 nfsuint64 cookie; 1988 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1989 struct nfsnode *dnp = VTONFS(vp); 1990 u_quad_t fileno; 1991 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; 1992 int attrflag; 1993 int v3 = NFS_ISV3(vp); 1994 1995#ifndef nolint 1996 dp = (struct dirent *)0; 1997#endif 1998#ifndef DIAGNOSTIC 1999 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (NFS_DIRBLKSIZ - 1)) || 2000 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2001 panic("nfs readdirrpc bad uio"); 2002#endif 2003 2004 /* 2005 * If there is no cookie, assume directory was stale. 2006 */ 2007 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0); 2008 if (cookiep) 2009 cookie = *cookiep; 2010 else 2011 return (NFSERR_BAD_COOKIE); 2012 /* 2013 * Loop around doing readdir rpc's of size nm_readdirsize 2014 * truncated to a multiple of DIRBLKSIZ. 2015 * The stopping criteria is EOF or buffer full. 2016 */ 2017 while (more_dirs && bigenough) { 2018 nfsstats.rpccnt[NFSPROC_READDIR]++; 2019 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) + 2020 NFSX_READDIR(v3)); 2021 nfsm_fhtom(vp, v3); 2022 if (v3) { 2023 nfsm_build(tl, u_long *, 5 * NFSX_UNSIGNED); 2024 *tl++ = cookie.nfsuquad[0]; 2025 *tl++ = cookie.nfsuquad[1]; 2026 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2027 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2028 } else { 2029 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 2030 *tl++ = cookie.nfsuquad[0]; 2031 } 2032 *tl = txdr_unsigned(nmp->nm_readdirsize); 2033 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred); 2034 if (v3) { 2035 nfsm_postop_attr(vp, attrflag); 2036 if (!error) { 2037 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 2038 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2039 dnp->n_cookieverf.nfsuquad[1] = *tl; 2040 } else { 2041 m_freem(mrep); 2042 goto nfsmout; 2043 } 2044 } 2045 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2046 more_dirs = fxdr_unsigned(int, *tl); 2047 2048 /* loop thru the dir entries, doctoring them to 4bsd form */ 2049 while (more_dirs && bigenough) { 2050 if (v3) { 2051 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2052 fxdr_hyper(tl, &fileno); 2053 len = fxdr_unsigned(int, *(tl + 2)); 2054 } else { 2055 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 2056 fileno = fxdr_unsigned(u_quad_t, *tl++); 2057 len = fxdr_unsigned(int, *tl); 2058 } 2059 if (len <= 0 || len > NFS_MAXNAMLEN) { 2060 error = EBADRPC; 2061 m_freem(mrep); 2062 goto nfsmout; 2063 } 2064 tlen = nfsm_rndup(len); 2065 if (tlen == len) 2066 tlen += 4; /* To ensure null termination */ 2067 left = DIRBLKSIZ - blksiz; 2068 if ((tlen + DIRHDSIZ) > left) { 2069 dp->d_reclen += left; 2070 uiop->uio_iov->iov_base += left; 2071 uiop->uio_iov->iov_len -= left; 2072 uiop->uio_offset += left; 2073 uiop->uio_resid -= left; 2074 blksiz = 0; 2075 } 2076 if ((tlen + DIRHDSIZ) > uiop->uio_resid) 2077 bigenough = 0; 2078 if (bigenough) { 2079 dp = (struct dirent *)uiop->uio_iov->iov_base; 2080 dp->d_fileno = (int)fileno; 2081 dp->d_namlen = len; 2082 dp->d_reclen = tlen + DIRHDSIZ; 2083 dp->d_type = DT_UNKNOWN; 2084 blksiz += dp->d_reclen; 2085 if (blksiz == DIRBLKSIZ) 2086 blksiz = 0; 2087 uiop->uio_offset += DIRHDSIZ; 2088 uiop->uio_resid -= DIRHDSIZ; 2089 uiop->uio_iov->iov_base += DIRHDSIZ; 2090 uiop->uio_iov->iov_len -= DIRHDSIZ; 2091 nfsm_mtouio(uiop, len); 2092 cp = uiop->uio_iov->iov_base; 2093 tlen -= len; 2094 *cp = '\0'; /* null terminate */ 2095 uiop->uio_iov->iov_base += tlen; 2096 uiop->uio_iov->iov_len -= tlen; 2097 uiop->uio_offset += tlen; 2098 uiop->uio_resid -= tlen; 2099 } else 2100 nfsm_adv(nfsm_rndup(len)); 2101 if (v3) { 2102 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2103 } else { 2104 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 2105 } 2106 if (bigenough) { 2107 cookie.nfsuquad[0] = *tl++; 2108 if (v3) 2109 cookie.nfsuquad[1] = *tl++; 2110 } else if (v3) 2111 tl += 2; 2112 else 2113 tl++; 2114 more_dirs = fxdr_unsigned(int, *tl); 2115 } 2116 /* 2117 * If at end of rpc data, get the eof boolean 2118 */ 2119 if (!more_dirs) { 2120 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2121 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2122 } 2123 m_freem(mrep); 2124 } 2125 /* 2126 * Fill last record, iff any, out to a multiple of DIRBLKSIZ 2127 * by increasing d_reclen for the last record. 2128 */ 2129 if (blksiz > 0) { 2130 left = DIRBLKSIZ - blksiz; 2131 dp->d_reclen += left; 2132 uiop->uio_iov->iov_base += left; 2133 uiop->uio_iov->iov_len -= left; 2134 uiop->uio_offset += left; 2135 uiop->uio_resid -= left; 2136 } 2137 2138 /* 2139 * We are now either at the end of the directory or have filled the 2140 * block. 2141 */ 2142 if (bigenough) 2143 dnp->n_direofoffset = uiop->uio_offset; 2144 else { 2145 if (uiop->uio_resid > 0) 2146 printf("EEK! readdirrpc resid > 0\n"); 2147 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1); 2148 *cookiep = cookie; 2149 } 2150nfsmout: 2151 return (error); 2152} 2153 2154/* 2155 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2156 */ 2157int 2158nfs_readdirplusrpc(vp, uiop, cred) 2159 struct vnode *vp; 2160 register struct uio *uiop; 2161 struct ucred *cred; 2162{ 2163 register int len, left; 2164 register struct dirent *dp; 2165 register u_long *tl; 2166 register caddr_t cp; 2167 register long t1, t2; 2168 register struct vnode *newvp; 2169 register nfsuint64 *cookiep; 2170 caddr_t bpos, dpos, cp2, dpossav1, dpossav2; 2171 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2; 2172 struct nameidata nami, *ndp = &nami; 2173 struct componentname *cnp = &ndp->ni_cnd; 2174 nfsuint64 cookie; 2175 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2176 struct nfsnode *dnp = VTONFS(vp), *np; 2177 nfsfh_t *fhp; 2178 u_quad_t fileno; 2179 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2180 int attrflag, fhsize; 2181 2182#ifndef nolint 2183 dp = (struct dirent *)0; 2184#endif 2185#ifndef DIAGNOSTIC 2186 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) || 2187 (uiop->uio_resid & (DIRBLKSIZ - 1))) 2188 panic("nfs readdirplusrpc bad uio"); 2189#endif 2190 ndp->ni_dvp = vp; 2191 newvp = NULLVP; 2192 2193 /* 2194 * If there is no cookie, assume directory was stale. 2195 */ 2196 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0); 2197 if (cookiep) 2198 cookie = *cookiep; 2199 else 2200 return (NFSERR_BAD_COOKIE); 2201 /* 2202 * Loop around doing readdir rpc's of size nm_readdirsize 2203 * truncated to a multiple of DIRBLKSIZ. 2204 * The stopping criteria is EOF or buffer full. 2205 */ 2206 while (more_dirs && bigenough) { 2207 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2208 nfsm_reqhead(vp, NFSPROC_READDIRPLUS, 2209 NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2210 nfsm_fhtom(vp, 1); 2211 nfsm_build(tl, u_long *, 6 * NFSX_UNSIGNED); 2212 *tl++ = cookie.nfsuquad[0]; 2213 *tl++ = cookie.nfsuquad[1]; 2214 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2215 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2216 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2217 *tl = txdr_unsigned(nmp->nm_rsize); 2218 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred); 2219 nfsm_postop_attr(vp, attrflag); 2220 if (error) { 2221 m_freem(mrep); 2222 goto nfsmout; 2223 } 2224 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2225 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2226 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2227 more_dirs = fxdr_unsigned(int, *tl); 2228 2229 /* loop thru the dir entries, doctoring them to 4bsd form */ 2230 while (more_dirs && bigenough) { 2231 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2232 fxdr_hyper(tl, &fileno); 2233 len = fxdr_unsigned(int, *(tl + 2)); 2234 if (len <= 0 || len > NFS_MAXNAMLEN) { 2235 error = EBADRPC; 2236 m_freem(mrep); 2237 goto nfsmout; 2238 } 2239 tlen = nfsm_rndup(len); 2240 if (tlen == len) 2241 tlen += 4; /* To ensure null termination*/ 2242 left = DIRBLKSIZ - blksiz; 2243 if ((tlen + DIRHDSIZ) > left) { 2244 dp->d_reclen += left; 2245 uiop->uio_iov->iov_base += left; 2246 uiop->uio_iov->iov_len -= left; 2247 uiop->uio_offset += left; 2248 uiop->uio_resid -= left; 2249 blksiz = 0; 2250 } 2251 if ((tlen + DIRHDSIZ) > uiop->uio_resid) 2252 bigenough = 0; 2253 if (bigenough) { 2254 dp = (struct dirent *)uiop->uio_iov->iov_base; 2255 dp->d_fileno = (int)fileno; 2256 dp->d_namlen = len; 2257 dp->d_reclen = tlen + DIRHDSIZ; 2258 dp->d_type = DT_UNKNOWN; 2259 blksiz += dp->d_reclen; 2260 if (blksiz == DIRBLKSIZ) 2261 blksiz = 0; 2262 uiop->uio_offset += DIRHDSIZ; 2263 uiop->uio_resid -= DIRHDSIZ; 2264 uiop->uio_iov->iov_base += DIRHDSIZ; 2265 uiop->uio_iov->iov_len -= DIRHDSIZ; 2266 cnp->cn_nameptr = uiop->uio_iov->iov_base; 2267 cnp->cn_namelen = len; 2268 nfsm_mtouio(uiop, len); 2269 cp = uiop->uio_iov->iov_base; 2270 tlen -= len; 2271 *cp = '\0'; 2272 uiop->uio_iov->iov_base += tlen; 2273 uiop->uio_iov->iov_len -= tlen; 2274 uiop->uio_offset += tlen; 2275 uiop->uio_resid -= tlen; 2276 } else 2277 nfsm_adv(nfsm_rndup(len)); 2278 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2279 if (bigenough) { 2280 cookie.nfsuquad[0] = *tl++; 2281 cookie.nfsuquad[1] = *tl++; 2282 } else 2283 tl += 2; 2284 2285 /* 2286 * Since the attributes are before the file handle 2287 * (sigh), we must skip over the attributes and then 2288 * come back and get them. 2289 */ 2290 attrflag = fxdr_unsigned(int, *tl); 2291 if (attrflag) { 2292 dpossav1 = dpos; 2293 mdsav1 = md; 2294 nfsm_adv(NFSX_V3FATTR); 2295 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2296 doit = fxdr_unsigned(int, *tl); 2297 if (doit) { 2298 nfsm_getfh(fhp, fhsize, 1); 2299 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2300 VREF(vp); 2301 newvp = vp; 2302 np = dnp; 2303 } else { 2304 error = nfs_nget(vp->v_mount, fhp, 2305 fhsize, &np); 2306 if (error) 2307 doit = 0; 2308 else 2309 newvp = NFSTOV(np); 2310 } 2311 } 2312 if (doit) { 2313 dpossav2 = dpos; 2314 dpos = dpossav1; 2315 mdsav2 = md; 2316 md = mdsav1; 2317 nfsm_loadattr(newvp, (struct vattr *)0); 2318 dpos = dpossav2; 2319 md = mdsav2; 2320 dp->d_type = 2321 IFTODT(VTTOIF(np->n_vattr.va_type)); 2322 ndp->ni_vp = newvp; 2323 cnp->cn_hash = 0; 2324 for (cp = cnp->cn_nameptr, i = 1; i <= len; 2325 i++, cp++) 2326 cnp->cn_hash += (unsigned char)*cp * i; 2327 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp); 2328 } 2329 } else { 2330 /* Just skip over the file handle */ 2331 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2332 i = fxdr_unsigned(int, *tl); 2333 nfsm_adv(nfsm_rndup(i)); 2334 } 2335 if (newvp != NULLVP) { 2336 vrele(newvp); 2337 newvp = NULLVP; 2338 } 2339 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2340 more_dirs = fxdr_unsigned(int, *tl); 2341 } 2342 /* 2343 * If at end of rpc data, get the eof boolean 2344 */ 2345 if (!more_dirs) { 2346 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2347 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2348 } 2349 m_freem(mrep); 2350 } 2351 /* 2352 * Fill last record, iff any, out to a multiple of NFS_DIRBLKSIZ 2353 * by increasing d_reclen for the last record. 2354 */ 2355 if (blksiz > 0) { 2356 left = DIRBLKSIZ - blksiz; 2357 dp->d_reclen += left; 2358 uiop->uio_iov->iov_base += left; 2359 uiop->uio_iov->iov_len -= left; 2360 uiop->uio_offset += left; 2361 uiop->uio_resid -= left; 2362 } 2363 2364 /* 2365 * We are now either at the end of the directory or have filled the 2366 * block. 2367 */ 2368 if (bigenough) 2369 dnp->n_direofoffset = uiop->uio_offset; 2370 else { 2371 if (uiop->uio_resid > 0) 2372 printf("EEK! readdirplusrpc resid > 0\n"); 2373 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1); 2374 *cookiep = cookie; 2375 } 2376nfsmout: 2377 if (newvp != NULLVP) { 2378 if (newvp == vp) 2379 vrele(newvp); 2380 else 2381 vput(newvp); 2382 newvp = NULLVP; 2383 } 2384 return (error); 2385} 2386 2387/* 2388 * Silly rename. To make the NFS filesystem that is stateless look a little 2389 * more like the "ufs" a remove of an active vnode is translated to a rename 2390 * to a funny looking filename that is removed by nfs_inactive on the 2391 * nfsnode. There is the potential for another process on a different client 2392 * to create the same funny name between the nfs_lookitup() fails and the 2393 * nfs_rename() completes, but... 2394 */ 2395static int 2396nfs_sillyrename(dvp, vp, cnp) 2397 struct vnode *dvp, *vp; 2398 struct componentname *cnp; 2399{ 2400 register struct sillyrename *sp; 2401 struct nfsnode *np; 2402 int error; 2403 short pid; 2404 2405 cache_purge(dvp); 2406 np = VTONFS(vp); 2407#ifndef DIAGNOSTIC 2408 if (vp->v_type == VDIR) 2409 panic("nfs: sillyrename dir"); 2410#endif 2411 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), 2412 M_NFSREQ, M_WAITOK); 2413 sp->s_cred = crdup(cnp->cn_cred); 2414 sp->s_dvp = dvp; 2415 VREF(dvp); 2416 2417 /* Fudge together a funny name */ 2418 pid = cnp->cn_proc->p_pid; 2419 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid); 2420 2421 /* Try lookitups until we get one that isn't there */ 2422 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2423 cnp->cn_proc, (struct nfsnode **)0) == 0) { 2424 sp->s_name[4]++; 2425 if (sp->s_name[4] > 'z') { 2426 error = EINVAL; 2427 goto bad; 2428 } 2429 } 2430 error = nfs_renameit(dvp, cnp, sp); 2431 if (error) 2432 goto bad; 2433 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2434 cnp->cn_proc, &np); 2435 np->n_sillyrename = sp; 2436 return (0); 2437bad: 2438 vrele(sp->s_dvp); 2439 crfree(sp->s_cred); 2440 free((caddr_t)sp, M_NFSREQ); 2441 return (error); 2442} 2443 2444/* 2445 * Look up a file name and optionally either update the file handle or 2446 * allocate an nfsnode, depending on the value of npp. 2447 * npp == NULL --> just do the lookup 2448 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2449 * handled too 2450 * *npp != NULL --> update the file handle in the vnode 2451 */ 2452static int 2453nfs_lookitup(dvp, name, len, cred, procp, npp) 2454 register struct vnode *dvp; 2455 const char *name; 2456 int len; 2457 struct ucred *cred; 2458 struct proc *procp; 2459 struct nfsnode **npp; 2460{ 2461 register u_long *tl; 2462 register caddr_t cp; 2463 register long t1, t2; 2464 struct vnode *newvp = (struct vnode *)0; 2465 struct nfsnode *np, *dnp = VTONFS(dvp); 2466 caddr_t bpos, dpos, cp2; 2467 int error = 0, fhlen, attrflag; 2468 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 2469 nfsfh_t *nfhp; 2470 int v3 = NFS_ISV3(dvp); 2471 2472 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2473 nfsm_reqhead(dvp, NFSPROC_LOOKUP, 2474 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 2475 nfsm_fhtom(dvp, v3); 2476 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2477 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred); 2478 if (npp && !error) { 2479 nfsm_getfh(nfhp, fhlen, v3); 2480 if (*npp) { 2481 np = *npp; 2482 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { 2483 free((caddr_t)np->n_fhp, M_NFSBIGFH); 2484 np->n_fhp = &np->n_fh; 2485 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH) 2486 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK); 2487 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen); 2488 np->n_fhsize = fhlen; 2489 newvp = NFSTOV(np); 2490 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2491 VREF(dvp); 2492 newvp = dvp; 2493 } else { 2494 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2495 if (error) { 2496 m_freem(mrep); 2497 return (error); 2498 } 2499 newvp = NFSTOV(np); 2500 } 2501 if (v3) { 2502 nfsm_postop_attr(newvp, attrflag); 2503 if (!attrflag && *npp == NULL) { 2504 m_freem(mrep); 2505 if (newvp == dvp) 2506 vrele(newvp); 2507 else 2508 vput(newvp); 2509 return (ENOENT); 2510 } 2511 } else 2512 nfsm_loadattr(newvp, (struct vattr *)0); 2513 } 2514 nfsm_reqdone; 2515 if (npp && *npp == NULL) { 2516 if (error) { 2517 if (newvp) 2518 if (newvp == dvp) 2519 vrele(newvp); 2520 else 2521 vput(newvp); 2522 } else 2523 *npp = np; 2524 } 2525 return (error); 2526} 2527 2528/* 2529 * Nfs Version 3 commit rpc 2530 */ 2531static int 2532nfs_commit(vp, offset, cnt, cred, procp) 2533 register struct vnode *vp; 2534 u_quad_t offset; 2535 int cnt; 2536 struct ucred *cred; 2537 struct proc *procp; 2538{ 2539 register caddr_t cp; 2540 register u_long *tl; 2541 register int t1, t2; 2542 register struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2543 caddr_t bpos, dpos, cp2; 2544 int error = 0, wccflag = NFSV3_WCCRATTR; 2545 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 2546 2547 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) 2548 return (0); 2549 nfsstats.rpccnt[NFSPROC_COMMIT]++; 2550 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1)); 2551 nfsm_fhtom(vp, 1); 2552 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 2553 txdr_hyper(&offset, tl); 2554 tl += 2; 2555 *tl = txdr_unsigned(cnt); 2556 nfsm_request(vp, NFSPROC_COMMIT, procp, cred); 2557 nfsm_wcc_data(vp, wccflag); 2558 if (!error) { 2559 nfsm_dissect(tl, u_long *, NFSX_V3WRITEVERF); 2560 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl, 2561 NFSX_V3WRITEVERF)) { 2562 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 2563 NFSX_V3WRITEVERF); 2564 error = NFSERR_STALEWRITEVERF; 2565 } 2566 } 2567 nfsm_reqdone; 2568 return (error); 2569} 2570 2571/* 2572 * Kludge City.. 2573 * - make nfs_bmap() essentially a no-op that does no translation 2574 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 2575 * (Maybe I could use the process's page mapping, but I was concerned that 2576 * Kernel Write might not be enabled and also figured copyout() would do 2577 * a lot more work than bcopy() and also it currently happens in the 2578 * context of the swapper process (2). 2579 */ 2580static int 2581nfs_bmap(ap) 2582 struct vop_bmap_args /* { 2583 struct vnode *a_vp; 2584 daddr_t a_bn; 2585 struct vnode **a_vpp; 2586 daddr_t *a_bnp; 2587 int *a_runp; 2588 int *a_runb; 2589 } */ *ap; 2590{ 2591 register struct vnode *vp = ap->a_vp; 2592 2593 if (ap->a_vpp != NULL) 2594 *ap->a_vpp = vp; 2595 if (ap->a_bnp != NULL) 2596 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize); 2597 if (ap->a_runp != NULL) 2598 *ap->a_runp = 0; 2599 if (ap->a_runb != NULL) 2600 *ap->a_runb = 0; 2601 return (0); 2602} 2603 2604/* 2605 * Strategy routine. 2606 * For async requests when nfsiod(s) are running, queue the request by 2607 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 2608 * request. 2609 */ 2610static int 2611nfs_strategy(ap) 2612 struct vop_strategy_args *ap; 2613{ 2614 register struct buf *bp = ap->a_bp; 2615 struct ucred *cr; 2616 struct proc *p; 2617 int error = 0; 2618 2619 if (bp->b_flags & B_PHYS) 2620 panic("nfs physio"); 2621 if (bp->b_flags & B_ASYNC) 2622 p = (struct proc *)0; 2623 else 2624 p = curproc; /* XXX */ 2625 if (bp->b_flags & B_READ) 2626 cr = bp->b_rcred; 2627 else 2628 cr = bp->b_wcred; 2629 /* 2630 * If the op is asynchronous and an i/o daemon is waiting 2631 * queue the request, wake it up and wait for completion 2632 * otherwise just do it ourselves. 2633 */ 2634 if ((bp->b_flags & B_ASYNC) == 0 || 2635 nfs_asyncio(bp, NOCRED)) 2636 error = nfs_doio(bp, cr, p); 2637 return (error); 2638} 2639 2640/* 2641 * Mmap a file 2642 * 2643 * NB Currently unsupported. 2644 */ 2645/* ARGSUSED */ 2646static int 2647nfs_mmap(ap) 2648 struct vop_mmap_args /* { 2649 struct vnode *a_vp; 2650 int a_fflags; 2651 struct ucred *a_cred; 2652 struct proc *a_p; 2653 } */ *ap; 2654{ 2655 2656 return (EINVAL); 2657} 2658 2659/* 2660 * fsync vnode op. Just call nfs_flush() with commit == 1. 2661 */ 2662/* ARGSUSED */ 2663static int 2664nfs_fsync(ap) 2665 struct vop_fsync_args /* { 2666 struct vnodeop_desc *a_desc; 2667 struct vnode * a_vp; 2668 struct ucred * a_cred; 2669 int a_waitfor; 2670 struct proc * a_p; 2671 } */ *ap; 2672{ 2673 2674 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); 2675} 2676 2677/* 2678 * Flush all the blocks associated with a vnode. 2679 * Walk through the buffer pool and push any dirty pages 2680 * associated with the vnode. 2681 */ 2682static int 2683nfs_flush(vp, cred, waitfor, p, commit) 2684 register struct vnode *vp; 2685 struct ucred *cred; 2686 int waitfor; 2687 struct proc *p; 2688 int commit; 2689{ 2690 register struct nfsnode *np = VTONFS(vp); 2691 register struct buf *bp; 2692 register int i; 2693 struct buf *nbp; 2694 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2695 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2696 int passone = 1; 2697 u_quad_t off, endoff, toff; 2698 struct ucred* wcred = NULL; 2699 struct buf **bvec = NULL; 2700#ifndef NFS_COMMITBVECSIZ 2701#define NFS_COMMITBVECSIZ 20 2702#endif 2703 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; 2704 int bvecsize = 0, bveccount; 2705 2706 if (nmp->nm_flag & NFSMNT_INT) 2707 slpflag = PCATCH; 2708 if (!commit) 2709 passone = 0; 2710 /* 2711 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2712 * server, but nas not been committed to stable storage on the server 2713 * yet. On the first pass, the byte range is worked out and the commit 2714 * rpc is done. On the second pass, nfs_writebp() is called to do the 2715 * job. 2716 */ 2717again: 2718 off = (u_quad_t)-1; 2719 endoff = 0; 2720 bvecpos = 0; 2721 if (NFS_ISV3(vp) && commit) { 2722 s = splbio(); 2723 /* 2724 * Count up how many buffers waiting for a commit. 2725 */ 2726 bveccount = 0; 2727 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 2728 nbp = bp->b_vnbufs.le_next; 2729 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2730 == (B_DELWRI | B_NEEDCOMMIT)) 2731 bveccount++; 2732 } 2733 /* 2734 * Allocate space to remember the list of bufs to commit. It is 2735 * important to use M_NOWAIT here to avoid a race with nfs_write. 2736 * If we can't get memory (for whatever reason), we will end up 2737 * committing the buffers one-by-one in the loop below. 2738 */ 2739 if (bveccount > NFS_COMMITBVECSIZ) { 2740 if (bvec != NULL && bvec != bvec_on_stack) 2741 free(bvec, M_TEMP); 2742 bvec = (struct buf **) 2743 malloc(bveccount * sizeof(struct buf *), 2744 M_TEMP, M_NOWAIT); 2745 if (bvec == NULL) { 2746 bvec = bvec_on_stack; 2747 bvecsize = NFS_COMMITBVECSIZ; 2748 } else 2749 bvecsize = bveccount; 2750 } else { 2751 bvec = bvec_on_stack; 2752 bvecsize = NFS_COMMITBVECSIZ; 2753 } 2754 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 2755 nbp = bp->b_vnbufs.le_next; 2756 if (bvecpos >= bvecsize) 2757 break; 2758 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2759 != (B_DELWRI | B_NEEDCOMMIT)) 2760 continue; 2761 bremfree(bp); 2762 /* 2763 * Work out if all buffers are using the same cred 2764 * so we can deal with them all with one commit. 2765 */ 2766 if (wcred == NULL) 2767 wcred = bp->b_wcred; 2768 else if (wcred != bp->b_wcred) 2769 wcred = NOCRED; 2770 bp->b_flags |= (B_BUSY | B_WRITEINPROG); 2771 vfs_busy_pages(bp, 1); 2772 /* 2773 * A list of these buffers is kept so that the 2774 * second loop knows which buffers have actually 2775 * been committed. This is necessary, since there 2776 * may be a race between the commit rpc and new 2777 * uncommitted writes on the file. 2778 */ 2779 bvec[bvecpos++] = bp; 2780 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2781 bp->b_dirtyoff; 2782 if (toff < off) 2783 off = toff; 2784 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2785 if (toff > endoff) 2786 endoff = toff; 2787 } 2788 splx(s); 2789 } 2790 if (bvecpos > 0) { 2791 /* 2792 * Commit data on the server, as required. 2793 * If all bufs are using the same wcred, then use that with 2794 * one call for all of them, otherwise commit each one 2795 * separately. 2796 */ 2797 if (wcred != NOCRED) 2798 retv = nfs_commit(vp, off, (int)(endoff - off), 2799 wcred, p); 2800 else { 2801 retv = 0; 2802 for (i = 0; i < bvecpos; i++) { 2803 off_t off, size; 2804 bp = bvec[i]; 2805 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2806 bp->b_dirtyoff; 2807 size = (u_quad_t)(bp->b_dirtyend 2808 - bp->b_dirtyoff); 2809 retv = nfs_commit(vp, off, (int)size, 2810 bp->b_wcred, p); 2811 if (retv) break; 2812 } 2813 } 2814 2815 if (retv == NFSERR_STALEWRITEVERF) 2816 nfs_clearcommit(vp->v_mount); 2817 /* 2818 * Now, either mark the blocks I/O done or mark the 2819 * blocks dirty, depending on whether the commit 2820 * succeeded. 2821 */ 2822 for (i = 0; i < bvecpos; i++) { 2823 bp = bvec[i]; 2824 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG); 2825 if (retv) { 2826 vfs_unbusy_pages(bp); 2827 brelse(bp); 2828 } else { 2829 vp->v_numoutput++; 2830 bp->b_flags |= B_ASYNC; 2831 if (bp->b_flags & B_DELWRI) { 2832 --numdirtybuffers; 2833 if (needsbuffer) { 2834 vfs_bio_need_satisfy(); 2835 } 2836 } 2837 s = splbio(); /* XXX check this positionning */ 2838 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI); 2839 bp->b_dirtyoff = bp->b_dirtyend = 0; 2840 reassignbuf(bp, vp); 2841 splx(s); 2842 biodone(bp); 2843 } 2844 } 2845 } 2846 2847 /* 2848 * Start/do any write(s) that are required. 2849 */ 2850loop: 2851 s = splbio(); 2852 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 2853 nbp = bp->b_vnbufs.le_next; 2854 if (bp->b_flags & B_BUSY) { 2855 if (waitfor != MNT_WAIT || passone) 2856 continue; 2857 bp->b_flags |= B_WANTED; 2858 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 2859 "nfsfsync", slptimeo); 2860 splx(s); 2861 if (error) { 2862 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { 2863 error = EINTR; 2864 goto done; 2865 } 2866 if (slpflag == PCATCH) { 2867 slpflag = 0; 2868 slptimeo = 2 * hz; 2869 } 2870 } 2871 goto loop; 2872 } 2873 if ((bp->b_flags & B_DELWRI) == 0) 2874 panic("nfs_fsync: not dirty"); 2875 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) 2876 continue; 2877 bremfree(bp); 2878 if (passone || !commit) 2879 bp->b_flags |= (B_BUSY|B_ASYNC); 2880 else 2881 bp->b_flags |= (B_BUSY|B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); 2882 splx(s); 2883 VOP_BWRITE(bp); 2884 goto loop; 2885 } 2886 splx(s); 2887 if (passone) { 2888 passone = 0; 2889 goto again; 2890 } 2891 if (waitfor == MNT_WAIT) { 2892 while (vp->v_numoutput) { 2893 vp->v_flag |= VBWAIT; 2894 error = tsleep((caddr_t)&vp->v_numoutput, 2895 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo); 2896 if (error) { 2897 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { 2898 error = EINTR; 2899 goto done; 2900 } 2901 if (slpflag == PCATCH) { 2902 slpflag = 0; 2903 slptimeo = 2 * hz; 2904 } 2905 } 2906 } 2907 if (vp->v_dirtyblkhd.lh_first && commit) { 2908 goto loop; 2909 } 2910 } 2911 if (np->n_flag & NWRITEERR) { 2912 error = np->n_error; 2913 np->n_flag &= ~NWRITEERR; 2914 } 2915done: 2916 if (bvec != NULL && bvec != bvec_on_stack) 2917 free(bvec, M_TEMP); 2918 return (error); 2919} 2920 2921/* 2922 * NFS advisory byte-level locks. 2923 * Currently unsupported. 2924 */ 2925static int 2926nfs_advlock(ap) 2927 struct vop_advlock_args /* { 2928 struct vnode *a_vp; 2929 caddr_t a_id; 2930 int a_op; 2931 struct flock *a_fl; 2932 int a_flags; 2933 } */ *ap; 2934{ 2935 register struct nfsnode *np = VTONFS(ap->a_vp); 2936 2937 /* 2938 * The following kludge is to allow diskless support to work 2939 * until a real NFS lockd is implemented. Basically, just pretend 2940 * that this is a local lock. 2941 */ 2942 return (lf_advlock(ap, &(np->n_lockf), np->n_size)); 2943} 2944 2945/* 2946 * Print out the contents of an nfsnode. 2947 */ 2948static int 2949nfs_print(ap) 2950 struct vop_print_args /* { 2951 struct vnode *a_vp; 2952 } */ *ap; 2953{ 2954 register struct vnode *vp = ap->a_vp; 2955 register struct nfsnode *np = VTONFS(vp); 2956 2957 printf("tag VT_NFS, fileid %ld fsid 0x%lx", 2958 np->n_vattr.va_fileid, np->n_vattr.va_fsid); 2959 if (vp->v_type == VFIFO) 2960 fifo_printinfo(vp); 2961 printf("\n"); 2962 return (0); 2963} 2964 2965/* 2966 * Just call nfs_writebp() with the force argument set to 1. 2967 */ 2968static int 2969nfs_bwrite(ap) 2970 struct vop_bwrite_args /* { 2971 struct vnode *a_bp; 2972 } */ *ap; 2973{ 2974 2975 return (nfs_writebp(ap->a_bp, 1)); 2976} 2977 2978/* 2979 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless 2980 * the force flag is one and it also handles the B_NEEDCOMMIT flag. 2981 */ 2982int 2983nfs_writebp(bp, force) 2984 register struct buf *bp; 2985 int force; 2986{ 2987 int s; 2988 register int oldflags = bp->b_flags, retv = 1; 2989 off_t off; 2990 2991 if(!(bp->b_flags & B_BUSY)) 2992 panic("bwrite: buffer is not busy???"); 2993 2994 if (bp->b_flags & B_INVAL) 2995 bp->b_flags |= B_INVAL | B_NOCACHE; 2996 2997 if (bp->b_flags & B_DELWRI) { 2998 --numdirtybuffers; 2999 if (needsbuffer) 3000 vfs_bio_need_satisfy(); 3001 } 3002 s = splbio(); /* XXX check if needed */ 3003 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI); 3004 3005 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 3006 reassignbuf(bp, bp->b_vp); 3007 } 3008 3009 bp->b_vp->v_numoutput++; 3010 curproc->p_stats->p_ru.ru_oublock++; 3011 splx(s); 3012 3013 /* 3014 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not 3015 * an actual write will have to be scheduled via. VOP_STRATEGY(). 3016 * If B_WRITEINPROG is already set, then push it with a write anyhow. 3017 */ 3018 vfs_busy_pages(bp, 1); 3019 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { 3020 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 3021 bp->b_flags |= B_WRITEINPROG; 3022 retv = nfs_commit(bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff, 3023 bp->b_wcred, bp->b_proc); 3024 bp->b_flags &= ~B_WRITEINPROG; 3025 if (!retv) { 3026 bp->b_dirtyoff = bp->b_dirtyend = 0; 3027 bp->b_flags &= ~B_NEEDCOMMIT; 3028 biodone(bp); 3029 } else if (retv == NFSERR_STALEWRITEVERF) 3030 nfs_clearcommit(bp->b_vp->v_mount); 3031 } 3032 if (retv) { 3033 if (force) 3034 bp->b_flags |= B_WRITEINPROG; 3035 VOP_STRATEGY(bp); 3036 } 3037 3038 if( (oldflags & B_ASYNC) == 0) { 3039 int rtval = biowait(bp); 3040 3041 if (oldflags & B_DELWRI) { 3042 s = splbio(); 3043 reassignbuf(bp, bp->b_vp); 3044 splx(s); 3045 } 3046 3047 brelse(bp); 3048 return (rtval); 3049 } 3050 3051 return (0); 3052} 3053 3054/* 3055 * nfs special file access vnode op. 3056 * Essentially just get vattr and then imitate iaccess() since the device is 3057 * local to the client. 3058 */ 3059static int 3060nfsspec_access(ap) 3061 struct vop_access_args /* { 3062 struct vnode *a_vp; 3063 int a_mode; 3064 struct ucred *a_cred; 3065 struct proc *a_p; 3066 } */ *ap; 3067{ 3068 register struct vattr *vap; 3069 register gid_t *gp; 3070 register struct ucred *cred = ap->a_cred; 3071 struct vnode *vp = ap->a_vp; 3072 mode_t mode = ap->a_mode; 3073 struct vattr vattr; 3074 register int i; 3075 int error; 3076 3077 /* 3078 * Disallow write attempts on filesystems mounted read-only; 3079 * unless the file is a socket, fifo, or a block or character 3080 * device resident on the filesystem. 3081 */ 3082 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3083 switch (vp->v_type) { 3084 case VREG: 3085 case VDIR: 3086 case VLNK: 3087 return (EROFS); 3088 default: 3089 break; 3090 } 3091 } 3092 /* 3093 * If you're the super-user, 3094 * you always get access. 3095 */ 3096 if (cred->cr_uid == 0) 3097 return (0); 3098 vap = &vattr; 3099 error = VOP_GETATTR(vp, vap, cred, ap->a_p); 3100 if (error) 3101 return (error); 3102 /* 3103 * Access check is based on only one of owner, group, public. 3104 * If not owner, then check group. If not a member of the 3105 * group, then check public access. 3106 */ 3107 if (cred->cr_uid != vap->va_uid) { 3108 mode >>= 3; 3109 gp = cred->cr_groups; 3110 for (i = 0; i < cred->cr_ngroups; i++, gp++) 3111 if (vap->va_gid == *gp) 3112 goto found; 3113 mode >>= 3; 3114found: 3115 ; 3116 } 3117 error = (vap->va_mode & mode) == mode ? 0 : EACCES; 3118 return (error); 3119} 3120 3121/* 3122 * Read wrapper for special devices. 3123 */ 3124static int 3125nfsspec_read(ap) 3126 struct vop_read_args /* { 3127 struct vnode *a_vp; 3128 struct uio *a_uio; 3129 int a_ioflag; 3130 struct ucred *a_cred; 3131 } */ *ap; 3132{ 3133 register struct nfsnode *np = VTONFS(ap->a_vp); 3134 3135 /* 3136 * Set access flag. 3137 */ 3138 np->n_flag |= NACC; 3139 getnanotime(&np->n_atim); 3140 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3141} 3142 3143/* 3144 * Write wrapper for special devices. 3145 */ 3146static int 3147nfsspec_write(ap) 3148 struct vop_write_args /* { 3149 struct vnode *a_vp; 3150 struct uio *a_uio; 3151 int a_ioflag; 3152 struct ucred *a_cred; 3153 } */ *ap; 3154{ 3155 register struct nfsnode *np = VTONFS(ap->a_vp); 3156 3157 /* 3158 * Set update flag. 3159 */ 3160 np->n_flag |= NUPD; 3161 getnanotime(&np->n_mtim); 3162 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3163} 3164 3165/* 3166 * Close wrapper for special devices. 3167 * 3168 * Update the times on the nfsnode then do device close. 3169 */ 3170static int 3171nfsspec_close(ap) 3172 struct vop_close_args /* { 3173 struct vnode *a_vp; 3174 int a_fflag; 3175 struct ucred *a_cred; 3176 struct proc *a_p; 3177 } */ *ap; 3178{ 3179 register struct vnode *vp = ap->a_vp; 3180 register struct nfsnode *np = VTONFS(vp); 3181 struct vattr vattr; 3182 3183 if (np->n_flag & (NACC | NUPD)) { 3184 np->n_flag |= NCHG; 3185 if (vp->v_usecount == 1 && 3186 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3187 VATTR_NULL(&vattr); 3188 if (np->n_flag & NACC) 3189 vattr.va_atime = np->n_atim; 3190 if (np->n_flag & NUPD) 3191 vattr.va_mtime = np->n_mtim; 3192 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3193 } 3194 } 3195 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3196} 3197 3198/* 3199 * Read wrapper for fifos. 3200 */ 3201static int 3202nfsfifo_read(ap) 3203 struct vop_read_args /* { 3204 struct vnode *a_vp; 3205 struct uio *a_uio; 3206 int a_ioflag; 3207 struct ucred *a_cred; 3208 } */ *ap; 3209{ 3210 register struct nfsnode *np = VTONFS(ap->a_vp); 3211 3212 /* 3213 * Set access flag. 3214 */ 3215 np->n_flag |= NACC; 3216 getnanotime(&np->n_atim); 3217 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3218} 3219 3220/* 3221 * Write wrapper for fifos. 3222 */ 3223static int 3224nfsfifo_write(ap) 3225 struct vop_write_args /* { 3226 struct vnode *a_vp; 3227 struct uio *a_uio; 3228 int a_ioflag; 3229 struct ucred *a_cred; 3230 } */ *ap; 3231{ 3232 register struct nfsnode *np = VTONFS(ap->a_vp); 3233 3234 /* 3235 * Set update flag. 3236 */ 3237 np->n_flag |= NUPD; 3238 getnanotime(&np->n_mtim); 3239 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3240} 3241 3242/* 3243 * Close wrapper for fifos. 3244 * 3245 * Update the times on the nfsnode then do fifo close. 3246 */ 3247static int 3248nfsfifo_close(ap) 3249 struct vop_close_args /* { 3250 struct vnode *a_vp; 3251 int a_fflag; 3252 struct ucred *a_cred; 3253 struct proc *a_p; 3254 } */ *ap; 3255{ 3256 register struct vnode *vp = ap->a_vp; 3257 register struct nfsnode *np = VTONFS(vp); 3258 struct vattr vattr; 3259 struct timespec ts; 3260 3261 if (np->n_flag & (NACC | NUPD)) { 3262 getnanotime(&ts); 3263 if (np->n_flag & NACC) 3264 np->n_atim = ts; 3265 if (np->n_flag & NUPD) 3266 np->n_mtim = ts; 3267 np->n_flag |= NCHG; 3268 if (vp->v_usecount == 1 && 3269 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3270 VATTR_NULL(&vattr); 3271 if (np->n_flag & NACC) 3272 vattr.va_atime = np->n_atim; 3273 if (np->n_flag & NUPD) 3274 vattr.va_mtime = np->n_mtim; 3275 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3276 } 3277 } 3278 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3279} 3280