nfs_vnops.c revision 36522
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 37 * $Id: nfs_vnops.c,v 1.94 1998/05/31 18:23:24 peter Exp $ 38 */ 39 40 41/* 42 * vnode op calls for Sun NFS version 2 and 3 43 */ 44 45#include "opt_inet.h" 46 47#include <sys/param.h> 48#include <sys/kernel.h> 49#include <sys/systm.h> 50#include <sys/resourcevar.h> 51#include <sys/proc.h> 52#include <sys/mount.h> 53#include <sys/buf.h> 54#include <sys/malloc.h> 55#include <sys/mbuf.h> 56#include <sys/namei.h> 57#include <sys/socket.h> 58#include <sys/vnode.h> 59#include <sys/dirent.h> 60#include <sys/fcntl.h> 61#include <sys/lockf.h> 62 63#include <vm/vm.h> 64#include <vm/vm_extern.h> 65#include <vm/vm_zone.h> 66 67#include <miscfs/fifofs/fifo.h> 68#include <miscfs/specfs/specdev.h> 69 70#include <nfs/rpcv2.h> 71#include <nfs/nfsproto.h> 72#include <nfs/nfs.h> 73#include <nfs/nfsnode.h> 74#include <nfs/nfsmount.h> 75#include <nfs/xdr_subs.h> 76#include <nfs/nfsm_subs.h> 77#include <nfs/nqnfs.h> 78 79#include <net/if.h> 80#include <netinet/in.h> 81#include <netinet/in_var.h> 82 83/* Defs */ 84#define TRUE 1 85#define FALSE 0 86 87/* 88 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these 89 * calls are not in getblk() and brelse() so that they would not be necessary 90 * here. 91 */ 92#ifndef B_VMIO 93#define vfs_busy_pages(bp, f) 94#endif 95 96static int nfsspec_read __P((struct vop_read_args *)); 97static int nfsspec_write __P((struct vop_write_args *)); 98static int nfsfifo_read __P((struct vop_read_args *)); 99static int nfsfifo_write __P((struct vop_write_args *)); 100static int nfsspec_close __P((struct vop_close_args *)); 101static int nfsfifo_close __P((struct vop_close_args *)); 102#define nfs_poll vop_nopoll 103static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int)); 104static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *)); 105static int nfs_lookup __P((struct vop_lookup_args *)); 106static int nfs_create __P((struct vop_create_args *)); 107static int nfs_mknod __P((struct vop_mknod_args *)); 108static int nfs_open __P((struct vop_open_args *)); 109static int nfs_close __P((struct vop_close_args *)); 110static int nfs_access __P((struct vop_access_args *)); 111static int nfs_getattr __P((struct vop_getattr_args *)); 112static int nfs_setattr __P((struct vop_setattr_args *)); 113static int nfs_read __P((struct vop_read_args *)); 114static int nfs_mmap __P((struct vop_mmap_args *)); 115static int nfs_fsync __P((struct vop_fsync_args *)); 116static int nfs_remove __P((struct vop_remove_args *)); 117static int nfs_link __P((struct vop_link_args *)); 118static int nfs_rename __P((struct vop_rename_args *)); 119static int nfs_mkdir __P((struct vop_mkdir_args *)); 120static int nfs_rmdir __P((struct vop_rmdir_args *)); 121static int nfs_symlink __P((struct vop_symlink_args *)); 122static int nfs_readdir __P((struct vop_readdir_args *)); 123static int nfs_bmap __P((struct vop_bmap_args *)); 124static int nfs_strategy __P((struct vop_strategy_args *)); 125static int nfs_lookitup __P((struct vnode *, const char *, int, 126 struct ucred *, struct proc *, struct nfsnode **)); 127static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *)); 128static int nfsspec_access __P((struct vop_access_args *)); 129static int nfs_readlink __P((struct vop_readlink_args *)); 130static int nfs_print __P((struct vop_print_args *)); 131static int nfs_advlock __P((struct vop_advlock_args *)); 132static int nfs_bwrite __P((struct vop_bwrite_args *)); 133/* 134 * Global vfs data structures for nfs 135 */ 136vop_t **nfsv2_vnodeop_p; 137static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 138 { &vop_default_desc, (vop_t *) vop_defaultop }, 139 { &vop_abortop_desc, (vop_t *) nfs_abortop }, 140 { &vop_access_desc, (vop_t *) nfs_access }, 141 { &vop_advlock_desc, (vop_t *) nfs_advlock }, 142 { &vop_bmap_desc, (vop_t *) nfs_bmap }, 143 { &vop_bwrite_desc, (vop_t *) nfs_bwrite }, 144 { &vop_close_desc, (vop_t *) nfs_close }, 145 { &vop_create_desc, (vop_t *) nfs_create }, 146 { &vop_fsync_desc, (vop_t *) nfs_fsync }, 147 { &vop_getattr_desc, (vop_t *) nfs_getattr }, 148 { &vop_getpages_desc, (vop_t *) nfs_getpages }, 149 { &vop_putpages_desc, (vop_t *) nfs_putpages }, 150 { &vop_inactive_desc, (vop_t *) nfs_inactive }, 151 { &vop_lease_desc, (vop_t *) vop_null }, 152 { &vop_link_desc, (vop_t *) nfs_link }, 153 { &vop_lock_desc, (vop_t *) vop_sharedlock }, 154 { &vop_lookup_desc, (vop_t *) nfs_lookup }, 155 { &vop_mkdir_desc, (vop_t *) nfs_mkdir }, 156 { &vop_mknod_desc, (vop_t *) nfs_mknod }, 157 { &vop_mmap_desc, (vop_t *) nfs_mmap }, 158 { &vop_open_desc, (vop_t *) nfs_open }, 159 { &vop_poll_desc, (vop_t *) nfs_poll }, 160 { &vop_print_desc, (vop_t *) nfs_print }, 161 { &vop_read_desc, (vop_t *) nfs_read }, 162 { &vop_readdir_desc, (vop_t *) nfs_readdir }, 163 { &vop_readlink_desc, (vop_t *) nfs_readlink }, 164 { &vop_reclaim_desc, (vop_t *) nfs_reclaim }, 165 { &vop_remove_desc, (vop_t *) nfs_remove }, 166 { &vop_rename_desc, (vop_t *) nfs_rename }, 167 { &vop_rmdir_desc, (vop_t *) nfs_rmdir }, 168 { &vop_setattr_desc, (vop_t *) nfs_setattr }, 169 { &vop_strategy_desc, (vop_t *) nfs_strategy }, 170 { &vop_symlink_desc, (vop_t *) nfs_symlink }, 171 { &vop_write_desc, (vop_t *) nfs_write }, 172 { NULL, NULL } 173}; 174static struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 175 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 176VNODEOP_SET(nfsv2_vnodeop_opv_desc); 177 178/* 179 * Special device vnode ops 180 */ 181vop_t **spec_nfsv2nodeop_p; 182static struct vnodeopv_entry_desc nfsv2_specop_entries[] = { 183 { &vop_default_desc, (vop_t *) spec_vnoperate }, 184 { &vop_access_desc, (vop_t *) nfsspec_access }, 185 { &vop_close_desc, (vop_t *) nfsspec_close }, 186 { &vop_fsync_desc, (vop_t *) nfs_fsync }, 187 { &vop_getattr_desc, (vop_t *) nfs_getattr }, 188 { &vop_inactive_desc, (vop_t *) nfs_inactive }, 189 { &vop_lock_desc, (vop_t *) vop_sharedlock }, 190 { &vop_print_desc, (vop_t *) nfs_print }, 191 { &vop_read_desc, (vop_t *) nfsspec_read }, 192 { &vop_reclaim_desc, (vop_t *) nfs_reclaim }, 193 { &vop_setattr_desc, (vop_t *) nfs_setattr }, 194 { &vop_write_desc, (vop_t *) nfsspec_write }, 195 { NULL, NULL } 196}; 197static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 198 { &spec_nfsv2nodeop_p, nfsv2_specop_entries }; 199VNODEOP_SET(spec_nfsv2nodeop_opv_desc); 200 201vop_t **fifo_nfsv2nodeop_p; 202static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = { 203 { &vop_default_desc, (vop_t *) fifo_vnoperate }, 204 { &vop_access_desc, (vop_t *) nfsspec_access }, 205 { &vop_close_desc, (vop_t *) nfsfifo_close }, 206 { &vop_fsync_desc, (vop_t *) nfs_fsync }, 207 { &vop_getattr_desc, (vop_t *) nfs_getattr }, 208 { &vop_inactive_desc, (vop_t *) nfs_inactive }, 209 { &vop_lock_desc, (vop_t *) vop_sharedlock }, 210 { &vop_print_desc, (vop_t *) nfs_print }, 211 { &vop_read_desc, (vop_t *) nfsfifo_read }, 212 { &vop_reclaim_desc, (vop_t *) nfs_reclaim }, 213 { &vop_setattr_desc, (vop_t *) nfs_setattr }, 214 { &vop_write_desc, (vop_t *) nfsfifo_write }, 215 { NULL, NULL } 216}; 217static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 218 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries }; 219VNODEOP_SET(fifo_nfsv2nodeop_opv_desc); 220 221static int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt, 222 struct ucred *cred, struct proc *procp)); 223static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp, 224 struct componentname *cnp, 225 struct vattr *vap)); 226static int nfs_removerpc __P((struct vnode *dvp, const char *name, 227 int namelen, 228 struct ucred *cred, struct proc *proc)); 229static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr, 230 int fnamelen, struct vnode *tdvp, 231 const char *tnameptr, int tnamelen, 232 struct ucred *cred, struct proc *proc)); 233static int nfs_renameit __P((struct vnode *sdvp, 234 struct componentname *scnp, 235 struct sillyrename *sp)); 236 237/* 238 * Global variables 239 */ 240extern u_long nfs_true, nfs_false; 241extern struct nfsstats nfsstats; 242extern nfstype nfsv3_type[9]; 243struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; 244struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON]; 245int nfs_numasync = 0; 246#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) 247 248/* 249 * nfs access vnode op. 250 * For nfs version 2, just return ok. File accesses may fail later. 251 * For nfs version 3, use the access rpc to check accessibility. If file modes 252 * are changed on the server, accesses might still fail later. 253 */ 254static int 255nfs_access(ap) 256 struct vop_access_args /* { 257 struct vnode *a_vp; 258 int a_mode; 259 struct ucred *a_cred; 260 struct proc *a_p; 261 } */ *ap; 262{ 263 register struct vnode *vp = ap->a_vp; 264 register u_long *tl; 265 register caddr_t cp; 266 register int t1, t2; 267 caddr_t bpos, dpos, cp2; 268 int error = 0, attrflag; 269 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 270 u_long mode, rmode; 271 int v3 = NFS_ISV3(vp); 272 273 /* 274 * Disallow write attempts on filesystems mounted read-only; 275 * unless the file is a socket, fifo, or a block or character 276 * device resident on the filesystem. 277 */ 278 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 279 switch (vp->v_type) { 280 case VREG: 281 case VDIR: 282 case VLNK: 283 return (EROFS); 284 default: 285 break; 286 } 287 } 288 /* 289 * For nfs v3, do an access rpc, otherwise you are stuck emulating 290 * ufs_access() locally using the vattr. This may not be correct, 291 * since the server may apply other access criteria such as 292 * client uid-->server uid mapping that we do not know about, but 293 * this is better than just returning anything that is lying about 294 * in the cache. 295 */ 296 if (v3) { 297 nfsstats.rpccnt[NFSPROC_ACCESS]++; 298 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); 299 nfsm_fhtom(vp, v3); 300 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 301 if (ap->a_mode & VREAD) 302 mode = NFSV3ACCESS_READ; 303 else 304 mode = 0; 305 if (vp->v_type != VDIR) { 306 if (ap->a_mode & VWRITE) 307 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 308 if (ap->a_mode & VEXEC) 309 mode |= NFSV3ACCESS_EXECUTE; 310 } else { 311 if (ap->a_mode & VWRITE) 312 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 313 NFSV3ACCESS_DELETE); 314 if (ap->a_mode & VEXEC) 315 mode |= NFSV3ACCESS_LOOKUP; 316 } 317 *tl = txdr_unsigned(mode); 318 nfsm_request(vp, NFSPROC_ACCESS, ap->a_p, ap->a_cred); 319 nfsm_postop_attr(vp, attrflag); 320 if (!error) { 321 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 322 rmode = fxdr_unsigned(u_long, *tl); 323 /* 324 * The NFS V3 spec does not clarify whether or not 325 * the returned access bits can be a superset of 326 * the ones requested, so... 327 */ 328 if ((rmode & mode) != mode) 329 error = EACCES; 330 } 331 nfsm_reqdone; 332 return (error); 333 } else { 334 if (error = nfsspec_access(ap)) 335 return (error); 336 337 /* 338 * Attempt to prevent a mapped root from accessing a file 339 * which it shouldn't. We try to read a byte from the file 340 * if the user is root and the file is not zero length. 341 * After calling nfsspec_access, we should have the correct 342 * file size cached. 343 */ 344 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD) 345 && VTONFS(vp)->n_size > 0) { 346 struct iovec aiov; 347 struct uio auio; 348 char buf[1]; 349 350 aiov.iov_base = buf; 351 aiov.iov_len = 1; 352 auio.uio_iov = &aiov; 353 auio.uio_iovcnt = 1; 354 auio.uio_offset = 0; 355 auio.uio_resid = 1; 356 auio.uio_segflg = UIO_SYSSPACE; 357 auio.uio_rw = UIO_READ; 358 auio.uio_procp = ap->a_p; 359 360 if (vp->v_type == VREG) 361 error = nfs_readrpc(vp, &auio, ap->a_cred); 362 else if (vp->v_type == VDIR) { 363 char* bp; 364 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 365 aiov.iov_base = bp; 366 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ; 367 error = nfs_readdirrpc(vp, &auio, ap->a_cred); 368 free(bp, M_TEMP); 369 } else if (vp->v_type = VLNK) 370 error = nfs_readlinkrpc(vp, &auio, ap->a_cred); 371 else 372 error = EACCES; 373 } 374 return (error); 375 } 376} 377 378/* 379 * nfs open vnode op 380 * Check to see if the type is ok 381 * and that deletion is not in progress. 382 * For paged in text files, you will need to flush the page cache 383 * if consistency is lost. 384 */ 385/* ARGSUSED */ 386static int 387nfs_open(ap) 388 struct vop_open_args /* { 389 struct vnode *a_vp; 390 int a_mode; 391 struct ucred *a_cred; 392 struct proc *a_p; 393 } */ *ap; 394{ 395 register struct vnode *vp = ap->a_vp; 396 struct nfsnode *np = VTONFS(vp); 397 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 398 struct vattr vattr; 399 int error; 400 401 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 402#ifdef DIAGNOSTIC 403 printf("open eacces vtyp=%d\n",vp->v_type); 404#endif 405 return (EACCES); 406 } 407 /* 408 * Get a valid lease. If cached data is stale, flush it. 409 */ 410 if (nmp->nm_flag & NFSMNT_NQNFS) { 411 if (NQNFS_CKINVALID(vp, np, ND_READ)) { 412 do { 413 error = nqnfs_getlease(vp, ND_READ, ap->a_cred, 414 ap->a_p); 415 } while (error == NQNFS_EXPIRED); 416 if (error) 417 return (error); 418 if (np->n_lrev != np->n_brev || 419 (np->n_flag & NQNFSNONCACHE)) { 420 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 421 ap->a_p, 1)) == EINTR) 422 return (error); 423 np->n_brev = np->n_lrev; 424 } 425 } 426 } else { 427 if (np->n_flag & NMODIFIED) { 428 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 429 ap->a_p, 1)) == EINTR) 430 return (error); 431 np->n_attrstamp = 0; 432 if (vp->v_type == VDIR) 433 np->n_direofoffset = 0; 434 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 435 if (error) 436 return (error); 437 np->n_mtime = vattr.va_mtime.tv_sec; 438 } else { 439 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 440 if (error) 441 return (error); 442 if (np->n_mtime != vattr.va_mtime.tv_sec) { 443 if (vp->v_type == VDIR) 444 np->n_direofoffset = 0; 445 if ((error = nfs_vinvalbuf(vp, V_SAVE, 446 ap->a_cred, ap->a_p, 1)) == EINTR) 447 return (error); 448 np->n_mtime = vattr.va_mtime.tv_sec; 449 } 450 } 451 } 452 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) 453 np->n_attrstamp = 0; /* For Open/Close consistency */ 454 return (0); 455} 456 457/* 458 * nfs close vnode op 459 * What an NFS client should do upon close after writing is a debatable issue. 460 * Most NFS clients push delayed writes to the server upon close, basically for 461 * two reasons: 462 * 1 - So that any write errors may be reported back to the client process 463 * doing the close system call. By far the two most likely errors are 464 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 465 * 2 - To put a worst case upper bound on cache inconsistency between 466 * multiple clients for the file. 467 * There is also a consistency problem for Version 2 of the protocol w.r.t. 468 * not being able to tell if other clients are writing a file concurrently, 469 * since there is no way of knowing if the changed modify time in the reply 470 * is only due to the write for this client. 471 * (NFS Version 3 provides weak cache consistency data in the reply that 472 * should be sufficient to detect and handle this case.) 473 * 474 * The current code does the following: 475 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 476 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 477 * or commit them (this satisfies 1 and 2 except for the 478 * case where the server crashes after this close but 479 * before the commit RPC, which is felt to be "good 480 * enough". Changing the last argument to nfs_flush() to 481 * a 1 would force a commit operation, if it is felt a 482 * commit is necessary now. 483 * for NQNFS - do nothing now, since 2 is dealt with via leases and 484 * 1 should be dealt with via an fsync() system call for 485 * cases where write errors are important. 486 */ 487/* ARGSUSED */ 488static int 489nfs_close(ap) 490 struct vop_close_args /* { 491 struct vnodeop_desc *a_desc; 492 struct vnode *a_vp; 493 int a_fflag; 494 struct ucred *a_cred; 495 struct proc *a_p; 496 } */ *ap; 497{ 498 register struct vnode *vp = ap->a_vp; 499 register struct nfsnode *np = VTONFS(vp); 500 int error = 0; 501 502 if (vp->v_type == VREG) { 503 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 && 504 (np->n_flag & NMODIFIED)) { 505 if (NFS_ISV3(vp)) { 506 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); 507 np->n_flag &= ~NMODIFIED; 508 } else 509 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1); 510 np->n_attrstamp = 0; 511 } 512 if (np->n_flag & NWRITEERR) { 513 np->n_flag &= ~NWRITEERR; 514 error = np->n_error; 515 } 516 } 517 return (error); 518} 519 520/* 521 * nfs getattr call from vfs. 522 */ 523static int 524nfs_getattr(ap) 525 struct vop_getattr_args /* { 526 struct vnode *a_vp; 527 struct vattr *a_vap; 528 struct ucred *a_cred; 529 struct proc *a_p; 530 } */ *ap; 531{ 532 register struct vnode *vp = ap->a_vp; 533 register struct nfsnode *np = VTONFS(vp); 534 register caddr_t cp; 535 register u_long *tl; 536 register int t1, t2; 537 caddr_t bpos, dpos; 538 int error = 0; 539 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 540 int v3 = NFS_ISV3(vp); 541 542 /* 543 * Update local times for special files. 544 */ 545 if (np->n_flag & (NACC | NUPD)) 546 np->n_flag |= NCHG; 547 /* 548 * First look in the cache. 549 */ 550 if (nfs_getattrcache(vp, ap->a_vap) == 0) 551 return (0); 552 nfsstats.rpccnt[NFSPROC_GETATTR]++; 553 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3)); 554 nfsm_fhtom(vp, v3); 555 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred); 556 if (!error) { 557 nfsm_loadattr(vp, ap->a_vap); 558 } 559 nfsm_reqdone; 560 return (error); 561} 562 563/* 564 * nfs setattr call. 565 */ 566static int 567nfs_setattr(ap) 568 struct vop_setattr_args /* { 569 struct vnodeop_desc *a_desc; 570 struct vnode *a_vp; 571 struct vattr *a_vap; 572 struct ucred *a_cred; 573 struct proc *a_p; 574 } */ *ap; 575{ 576 register struct vnode *vp = ap->a_vp; 577 register struct nfsnode *np = VTONFS(vp); 578 register struct vattr *vap = ap->a_vap; 579 int error = 0; 580 u_quad_t tsize; 581 582#ifndef nolint 583 tsize = (u_quad_t)0; 584#endif 585 /* 586 * Disallow write attempts if the filesystem is mounted read-only. 587 */ 588 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 589 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 590 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 591 (vp->v_mount->mnt_flag & MNT_RDONLY)) 592 return (EROFS); 593 if (vap->va_size != VNOVAL) { 594 switch (vp->v_type) { 595 case VDIR: 596 return (EISDIR); 597 case VCHR: 598 case VBLK: 599 case VSOCK: 600 case VFIFO: 601 if (vap->va_mtime.tv_sec == VNOVAL && 602 vap->va_atime.tv_sec == VNOVAL && 603 vap->va_mode == (u_short)VNOVAL && 604 vap->va_uid == (uid_t)VNOVAL && 605 vap->va_gid == (gid_t)VNOVAL) 606 return (0); 607 vap->va_size = VNOVAL; 608 break; 609 default: 610 /* 611 * Disallow write attempts if the filesystem is 612 * mounted read-only. 613 */ 614 if (vp->v_mount->mnt_flag & MNT_RDONLY) 615 return (EROFS); 616 if (np->n_flag & NMODIFIED) { 617 if (vap->va_size == 0) 618 error = nfs_vinvalbuf(vp, 0, 619 ap->a_cred, ap->a_p, 1); 620 else 621 error = nfs_vinvalbuf(vp, V_SAVE, 622 ap->a_cred, ap->a_p, 1); 623 if (error) 624 return (error); 625 } 626 tsize = np->n_size; 627 np->n_size = np->n_vattr.va_size = vap->va_size; 628 vnode_pager_setsize(vp, (u_long)np->n_size); 629 }; 630 } else if ((vap->va_mtime.tv_sec != VNOVAL || 631 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) && 632 vp->v_type == VREG && 633 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 634 ap->a_p, 1)) == EINTR) 635 return (error); 636 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); 637 if (error && vap->va_size != VNOVAL) { 638 np->n_size = np->n_vattr.va_size = tsize; 639 vnode_pager_setsize(vp, (u_long)np->n_size); 640 } 641 return (error); 642} 643 644/* 645 * Do an nfs setattr rpc. 646 */ 647static int 648nfs_setattrrpc(vp, vap, cred, procp) 649 register struct vnode *vp; 650 register struct vattr *vap; 651 struct ucred *cred; 652 struct proc *procp; 653{ 654 register struct nfsv2_sattr *sp; 655 register caddr_t cp; 656 register long t1, t2; 657 caddr_t bpos, dpos, cp2; 658 u_long *tl; 659 int error = 0, wccflag = NFSV3_WCCRATTR; 660 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 661 int v3 = NFS_ISV3(vp); 662 663 nfsstats.rpccnt[NFSPROC_SETATTR]++; 664 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); 665 nfsm_fhtom(vp, v3); 666 if (v3) { 667 if (vap->va_mode != (u_short)VNOVAL) { 668 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 669 *tl++ = nfs_true; 670 *tl = txdr_unsigned(vap->va_mode); 671 } else { 672 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 673 *tl = nfs_false; 674 } 675 if (vap->va_uid != (uid_t)VNOVAL) { 676 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 677 *tl++ = nfs_true; 678 *tl = txdr_unsigned(vap->va_uid); 679 } else { 680 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 681 *tl = nfs_false; 682 } 683 if (vap->va_gid != (gid_t)VNOVAL) { 684 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 685 *tl++ = nfs_true; 686 *tl = txdr_unsigned(vap->va_gid); 687 } else { 688 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 689 *tl = nfs_false; 690 } 691 if (vap->va_size != VNOVAL) { 692 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 693 *tl++ = nfs_true; 694 txdr_hyper(&vap->va_size, tl); 695 } else { 696 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 697 *tl = nfs_false; 698 } 699 if (vap->va_atime.tv_sec != VNOVAL) { 700 if (vap->va_atime.tv_sec != time_second) { 701 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 702 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); 703 txdr_nfsv3time(&vap->va_atime, tl); 704 } else { 705 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 706 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); 707 } 708 } else { 709 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 710 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); 711 } 712 if (vap->va_mtime.tv_sec != VNOVAL) { 713 if (vap->va_mtime.tv_sec != time_second) { 714 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 715 *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); 716 txdr_nfsv3time(&vap->va_mtime, tl); 717 } else { 718 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 719 *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); 720 } 721 } else { 722 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 723 *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); 724 } 725 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 726 *tl = nfs_false; 727 } else { 728 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 729 if (vap->va_mode == (u_short)VNOVAL) 730 sp->sa_mode = VNOVAL; 731 else 732 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 733 if (vap->va_uid == (uid_t)VNOVAL) 734 sp->sa_uid = VNOVAL; 735 else 736 sp->sa_uid = txdr_unsigned(vap->va_uid); 737 if (vap->va_gid == (gid_t)VNOVAL) 738 sp->sa_gid = VNOVAL; 739 else 740 sp->sa_gid = txdr_unsigned(vap->va_gid); 741 sp->sa_size = txdr_unsigned(vap->va_size); 742 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 743 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 744 } 745 nfsm_request(vp, NFSPROC_SETATTR, procp, cred); 746 if (v3) { 747 nfsm_wcc_data(vp, wccflag); 748 } else 749 nfsm_loadattr(vp, (struct vattr *)0); 750 nfsm_reqdone; 751 return (error); 752} 753 754/* 755 * nfs lookup call, one step at a time... 756 * First look in cache 757 * If not found, unlock the directory nfsnode and do the rpc 758 */ 759static int 760nfs_lookup(ap) 761 struct vop_lookup_args /* { 762 struct vnodeop_desc *a_desc; 763 struct vnode *a_dvp; 764 struct vnode **a_vpp; 765 struct componentname *a_cnp; 766 } */ *ap; 767{ 768 struct componentname *cnp = ap->a_cnp; 769 struct vnode *dvp = ap->a_dvp; 770 struct vnode **vpp = ap->a_vpp; 771 int flags = cnp->cn_flags; 772 struct vnode *newvp; 773 u_long *tl; 774 caddr_t cp; 775 long t1, t2; 776 struct nfsmount *nmp; 777 caddr_t bpos, dpos, cp2; 778 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 779 long len; 780 nfsfh_t *fhp; 781 struct nfsnode *np; 782 int lockparent, wantparent, error = 0, attrflag, fhsize; 783 int v3 = NFS_ISV3(dvp); 784 struct proc *p = cnp->cn_proc; 785 786 *vpp = NULLVP; 787 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 788 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 789 return (EROFS); 790 if (dvp->v_type != VDIR) 791 return (ENOTDIR); 792 lockparent = flags & LOCKPARENT; 793 wantparent = flags & (LOCKPARENT|WANTPARENT); 794 nmp = VFSTONFS(dvp->v_mount); 795 np = VTONFS(dvp); 796 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) { 797 struct vattr vattr; 798 int vpid; 799 800 if (error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) { 801 *vpp = NULLVP; 802 return (error); 803 } 804 805 newvp = *vpp; 806 vpid = newvp->v_id; 807 /* 808 * See the comment starting `Step through' in ufs/ufs_lookup.c 809 * for an explanation of the locking protocol 810 */ 811 if (dvp == newvp) { 812 VREF(newvp); 813 error = 0; 814 } else if (flags & ISDOTDOT) { 815 VOP_UNLOCK(dvp, 0, p); 816 error = vget(newvp, LK_EXCLUSIVE, p); 817 if (!error && lockparent && (flags & ISLASTCN)) 818 error = vn_lock(dvp, LK_EXCLUSIVE, p); 819 } else { 820 error = vget(newvp, LK_EXCLUSIVE, p); 821 if (!lockparent || error || !(flags & ISLASTCN)) 822 VOP_UNLOCK(dvp, 0, p); 823 } 824 if (!error) { 825 if (vpid == newvp->v_id) { 826 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p) 827 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { 828 nfsstats.lookupcache_hits++; 829 if (cnp->cn_nameiop != LOOKUP && 830 (flags & ISLASTCN)) 831 cnp->cn_flags |= SAVENAME; 832 return (0); 833 } 834 cache_purge(newvp); 835 } 836 vput(newvp); 837 if (lockparent && dvp != newvp && (flags & ISLASTCN)) 838 VOP_UNLOCK(dvp, 0, p); 839 } 840 error = vn_lock(dvp, LK_EXCLUSIVE, p); 841 *vpp = NULLVP; 842 if (error) 843 return (error); 844 } 845 error = 0; 846 newvp = NULLVP; 847 nfsstats.lookupcache_misses++; 848 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 849 len = cnp->cn_namelen; 850 nfsm_reqhead(dvp, NFSPROC_LOOKUP, 851 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 852 nfsm_fhtom(dvp, v3); 853 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 854 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred); 855 if (error) { 856 nfsm_postop_attr(dvp, attrflag); 857 m_freem(mrep); 858 goto nfsmout; 859 } 860 nfsm_getfh(fhp, fhsize, v3); 861 862 /* 863 * Handle RENAME case... 864 */ 865 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 866 if (NFS_CMPFH(np, fhp, fhsize)) { 867 m_freem(mrep); 868 return (EISDIR); 869 } 870 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 871 if (error) { 872 m_freem(mrep); 873 return (error); 874 } 875 newvp = NFSTOV(np); 876 if (v3) { 877 nfsm_postop_attr(newvp, attrflag); 878 nfsm_postop_attr(dvp, attrflag); 879 } else 880 nfsm_loadattr(newvp, (struct vattr *)0); 881 *vpp = newvp; 882 m_freem(mrep); 883 cnp->cn_flags |= SAVENAME; 884 if (!lockparent) 885 VOP_UNLOCK(dvp, 0, p); 886 return (0); 887 } 888 889 if (flags & ISDOTDOT) { 890 VOP_UNLOCK(dvp, 0, p); 891 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 892 if (error) { 893 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 894 return (error); 895 } 896 newvp = NFSTOV(np); 897 if (lockparent && (flags & ISLASTCN) && 898 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) { 899 vput(newvp); 900 return (error); 901 } 902 } else if (NFS_CMPFH(np, fhp, fhsize)) { 903 VREF(dvp); 904 newvp = dvp; 905 } else { 906 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 907 if (error) { 908 m_freem(mrep); 909 return (error); 910 } 911 if (!lockparent || !(flags & ISLASTCN)) 912 VOP_UNLOCK(dvp, 0, p); 913 newvp = NFSTOV(np); 914 } 915 if (v3) { 916 nfsm_postop_attr(newvp, attrflag); 917 nfsm_postop_attr(dvp, attrflag); 918 } else 919 nfsm_loadattr(newvp, (struct vattr *)0); 920 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 921 cnp->cn_flags |= SAVENAME; 922 if ((cnp->cn_flags & MAKEENTRY) && 923 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 924 np->n_ctime = np->n_vattr.va_ctime.tv_sec; 925 cache_enter(dvp, newvp, cnp); 926 } 927 *vpp = newvp; 928 nfsm_reqdone; 929 if (error) { 930 if (newvp != NULLVP) { 931 vrele(newvp); 932 *vpp = NULLVP; 933 } 934 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 935 (flags & ISLASTCN) && error == ENOENT) { 936 if (!lockparent) 937 VOP_UNLOCK(dvp, 0, p); 938 if (dvp->v_mount->mnt_flag & MNT_RDONLY) 939 error = EROFS; 940 else 941 error = EJUSTRETURN; 942 } 943 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 944 cnp->cn_flags |= SAVENAME; 945 } 946 return (error); 947} 948 949/* 950 * nfs read call. 951 * Just call nfs_bioread() to do the work. 952 */ 953static int 954nfs_read(ap) 955 struct vop_read_args /* { 956 struct vnode *a_vp; 957 struct uio *a_uio; 958 int a_ioflag; 959 struct ucred *a_cred; 960 } */ *ap; 961{ 962 register struct vnode *vp = ap->a_vp; 963 964 if (vp->v_type != VREG) 965 return (EPERM); 966 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); 967} 968 969/* 970 * nfs readlink call 971 */ 972static int 973nfs_readlink(ap) 974 struct vop_readlink_args /* { 975 struct vnode *a_vp; 976 struct uio *a_uio; 977 struct ucred *a_cred; 978 } */ *ap; 979{ 980 register struct vnode *vp = ap->a_vp; 981 982 if (vp->v_type != VLNK) 983 return (EPERM); 984 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); 985} 986 987/* 988 * Do a readlink rpc. 989 * Called by nfs_doio() from below the buffer cache. 990 */ 991int 992nfs_readlinkrpc(vp, uiop, cred) 993 register struct vnode *vp; 994 struct uio *uiop; 995 struct ucred *cred; 996{ 997 register u_long *tl; 998 register caddr_t cp; 999 register long t1, t2; 1000 caddr_t bpos, dpos, cp2; 1001 int error = 0, len, attrflag; 1002 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1003 int v3 = NFS_ISV3(vp); 1004 1005 nfsstats.rpccnt[NFSPROC_READLINK]++; 1006 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3)); 1007 nfsm_fhtom(vp, v3); 1008 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred); 1009 if (v3) 1010 nfsm_postop_attr(vp, attrflag); 1011 if (!error) { 1012 nfsm_strsiz(len, NFS_MAXPATHLEN); 1013 nfsm_mtouio(uiop, len); 1014 } 1015 nfsm_reqdone; 1016 return (error); 1017} 1018 1019/* 1020 * nfs read rpc call 1021 * Ditto above 1022 */ 1023int 1024nfs_readrpc(vp, uiop, cred) 1025 register struct vnode *vp; 1026 struct uio *uiop; 1027 struct ucred *cred; 1028{ 1029 register u_long *tl; 1030 register caddr_t cp; 1031 register long t1, t2; 1032 caddr_t bpos, dpos, cp2; 1033 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1034 struct nfsmount *nmp; 1035 int error = 0, len, retlen, tsiz, eof, attrflag; 1036 int v3 = NFS_ISV3(vp); 1037 1038#ifndef nolint 1039 eof = 0; 1040#endif 1041 nmp = VFSTONFS(vp->v_mount); 1042 tsiz = uiop->uio_resid; 1043 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1044 return (EFBIG); 1045 while (tsiz > 0) { 1046 nfsstats.rpccnt[NFSPROC_READ]++; 1047 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1048 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); 1049 nfsm_fhtom(vp, v3); 1050 nfsm_build(tl, u_long *, NFSX_UNSIGNED * 3); 1051 if (v3) { 1052 txdr_hyper(&uiop->uio_offset, tl); 1053 *(tl + 2) = txdr_unsigned(len); 1054 } else { 1055 *tl++ = txdr_unsigned(uiop->uio_offset); 1056 *tl++ = txdr_unsigned(len); 1057 *tl = 0; 1058 } 1059 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred); 1060 if (v3) { 1061 nfsm_postop_attr(vp, attrflag); 1062 if (error) { 1063 m_freem(mrep); 1064 goto nfsmout; 1065 } 1066 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 1067 eof = fxdr_unsigned(int, *(tl + 1)); 1068 } else 1069 nfsm_loadattr(vp, (struct vattr *)0); 1070 nfsm_strsiz(retlen, nmp->nm_rsize); 1071 nfsm_mtouio(uiop, retlen); 1072 m_freem(mrep); 1073 tsiz -= retlen; 1074 if (v3) { 1075 if (eof || retlen == 0) 1076 tsiz = 0; 1077 } else if (retlen < len) 1078 tsiz = 0; 1079 } 1080nfsmout: 1081 return (error); 1082} 1083 1084/* 1085 * nfs write call 1086 */ 1087int 1088nfs_writerpc(vp, uiop, cred, iomode, must_commit) 1089 register struct vnode *vp; 1090 register struct uio *uiop; 1091 struct ucred *cred; 1092 int *iomode, *must_commit; 1093{ 1094 register u_long *tl; 1095 register caddr_t cp; 1096 register int t1, t2, backup; 1097 caddr_t bpos, dpos, cp2; 1098 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1099 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1100 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; 1101 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC; 1102 1103#ifndef DIAGNOSTIC 1104 if (uiop->uio_iovcnt != 1) 1105 panic("nfs: writerpc iovcnt > 1"); 1106#endif 1107 *must_commit = 0; 1108 tsiz = uiop->uio_resid; 1109 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1110 return (EFBIG); 1111 while (tsiz > 0) { 1112 nfsstats.rpccnt[NFSPROC_WRITE]++; 1113 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; 1114 nfsm_reqhead(vp, NFSPROC_WRITE, 1115 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); 1116 nfsm_fhtom(vp, v3); 1117 if (v3) { 1118 nfsm_build(tl, u_long *, 5 * NFSX_UNSIGNED); 1119 txdr_hyper(&uiop->uio_offset, tl); 1120 tl += 2; 1121 *tl++ = txdr_unsigned(len); 1122 *tl++ = txdr_unsigned(*iomode); 1123 } else { 1124 nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); 1125 *++tl = txdr_unsigned(uiop->uio_offset); 1126 tl += 2; 1127 } 1128 *tl = txdr_unsigned(len); 1129 nfsm_uiotom(uiop, len); 1130 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred); 1131 if (v3) { 1132 wccflag = NFSV3_WCCCHK; 1133 nfsm_wcc_data(vp, wccflag); 1134 if (!error) { 1135 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED + 1136 NFSX_V3WRITEVERF); 1137 rlen = fxdr_unsigned(int, *tl++); 1138 if (rlen == 0) { 1139 error = NFSERR_IO; 1140 m_freem(mrep); 1141 break; 1142 } else if (rlen < len) { 1143 backup = len - rlen; 1144 uiop->uio_iov->iov_base -= backup; 1145 uiop->uio_iov->iov_len += backup; 1146 uiop->uio_offset -= backup; 1147 uiop->uio_resid += backup; 1148 len = rlen; 1149 } 1150 commit = fxdr_unsigned(int, *tl++); 1151 1152 /* 1153 * Return the lowest committment level 1154 * obtained by any of the RPCs. 1155 */ 1156 if (committed == NFSV3WRITE_FILESYNC) 1157 committed = commit; 1158 else if (committed == NFSV3WRITE_DATASYNC && 1159 commit == NFSV3WRITE_UNSTABLE) 1160 committed = commit; 1161 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){ 1162 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1163 NFSX_V3WRITEVERF); 1164 nmp->nm_state |= NFSSTA_HASWRITEVERF; 1165 } else if (bcmp((caddr_t)tl, 1166 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) { 1167 *must_commit = 1; 1168 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1169 NFSX_V3WRITEVERF); 1170 } 1171 } 1172 } else 1173 nfsm_loadattr(vp, (struct vattr *)0); 1174 if (wccflag) 1175 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec; 1176 m_freem(mrep); 1177 tsiz -= len; 1178 } 1179nfsmout: 1180 if (vp->v_mount->mnt_flag & MNT_ASYNC) 1181 committed = NFSV3WRITE_FILESYNC; 1182 *iomode = committed; 1183 if (error) 1184 uiop->uio_resid = tsiz; 1185 return (error); 1186} 1187 1188/* 1189 * nfs mknod rpc 1190 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1191 * mode set to specify the file type and the size field for rdev. 1192 */ 1193static int 1194nfs_mknodrpc(dvp, vpp, cnp, vap) 1195 register struct vnode *dvp; 1196 register struct vnode **vpp; 1197 register struct componentname *cnp; 1198 register struct vattr *vap; 1199{ 1200 register struct nfsv2_sattr *sp; 1201 register struct nfsv3_sattr *sp3; 1202 register u_long *tl; 1203 register caddr_t cp; 1204 register long t1, t2; 1205 struct vnode *newvp = (struct vnode *)0; 1206 struct nfsnode *np = (struct nfsnode *)0; 1207 struct vattr vattr; 1208 char *cp2; 1209 caddr_t bpos, dpos; 1210 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1211 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1212 u_long rdev; 1213 int v3 = NFS_ISV3(dvp); 1214 1215 if (vap->va_type == VCHR || vap->va_type == VBLK) 1216 rdev = txdr_unsigned(vap->va_rdev); 1217 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1218 rdev = 0xffffffff; 1219 else { 1220 VOP_ABORTOP(dvp, cnp); 1221 return (EOPNOTSUPP); 1222 } 1223 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) { 1224 VOP_ABORTOP(dvp, cnp); 1225 return (error); 1226 } 1227 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1228 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1229 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1230 nfsm_fhtom(dvp, v3); 1231 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1232 if (v3) { 1233 nfsm_build(tl, u_long *, NFSX_UNSIGNED + NFSX_V3SRVSATTR); 1234 *tl++ = vtonfsv3_type(vap->va_type); 1235 sp3 = (struct nfsv3_sattr *)tl; 1236 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); 1237 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1238 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 1239 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1240 *tl = txdr_unsigned(minor(vap->va_rdev)); 1241 } 1242 } else { 1243 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1244 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1245 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1246 sp->sa_gid = txdr_unsigned(vattr.va_gid); 1247 sp->sa_size = rdev; 1248 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1249 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1250 } 1251 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred); 1252 if (!error) { 1253 nfsm_mtofh(dvp, newvp, v3, gotvp); 1254 if (!gotvp) { 1255 if (newvp) { 1256 vput(newvp); 1257 newvp = (struct vnode *)0; 1258 } 1259 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1260 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1261 if (!error) 1262 newvp = NFSTOV(np); 1263 } 1264 } 1265 if (v3) 1266 nfsm_wcc_data(dvp, wccflag); 1267 nfsm_reqdone; 1268 if (error) { 1269 if (newvp) 1270 vput(newvp); 1271 } else { 1272 if (cnp->cn_flags & MAKEENTRY) 1273 cache_enter(dvp, newvp, cnp); 1274 *vpp = newvp; 1275 } 1276 zfree(namei_zone, cnp->cn_pnbuf); 1277 VTONFS(dvp)->n_flag |= NMODIFIED; 1278 if (!wccflag) 1279 VTONFS(dvp)->n_attrstamp = 0; 1280 return (error); 1281} 1282 1283/* 1284 * nfs mknod vop 1285 * just call nfs_mknodrpc() to do the work. 1286 */ 1287/* ARGSUSED */ 1288static int 1289nfs_mknod(ap) 1290 struct vop_mknod_args /* { 1291 struct vnode *a_dvp; 1292 struct vnode **a_vpp; 1293 struct componentname *a_cnp; 1294 struct vattr *a_vap; 1295 } */ *ap; 1296{ 1297 struct vnode *newvp; 1298 int error; 1299 1300 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); 1301 if (!error) 1302 vput(newvp); 1303 return (error); 1304} 1305 1306static u_long create_verf; 1307/* 1308 * nfs file create call 1309 */ 1310static int 1311nfs_create(ap) 1312 struct vop_create_args /* { 1313 struct vnode *a_dvp; 1314 struct vnode **a_vpp; 1315 struct componentname *a_cnp; 1316 struct vattr *a_vap; 1317 } */ *ap; 1318{ 1319 register struct vnode *dvp = ap->a_dvp; 1320 register struct vattr *vap = ap->a_vap; 1321 register struct componentname *cnp = ap->a_cnp; 1322 register struct nfsv2_sattr *sp; 1323 register struct nfsv3_sattr *sp3; 1324 register u_long *tl; 1325 register caddr_t cp; 1326 register long t1, t2; 1327 struct nfsnode *np = (struct nfsnode *)0; 1328 struct vnode *newvp = (struct vnode *)0; 1329 caddr_t bpos, dpos, cp2; 1330 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1331 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1332 struct vattr vattr; 1333 int v3 = NFS_ISV3(dvp); 1334 1335 /* 1336 * Oops, not for me.. 1337 */ 1338 if (vap->va_type == VSOCK) 1339 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1340 1341 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) { 1342 VOP_ABORTOP(dvp, cnp); 1343 return (error); 1344 } 1345 if (vap->va_vaflags & VA_EXCLUSIVE) 1346 fmode |= O_EXCL; 1347again: 1348 nfsstats.rpccnt[NFSPROC_CREATE]++; 1349 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1350 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1351 nfsm_fhtom(dvp, v3); 1352 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1353 if (v3) { 1354 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1355 if (fmode & O_EXCL) { 1356 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1357 nfsm_build(tl, u_long *, NFSX_V3CREATEVERF); 1358#ifdef INET 1359 if (!TAILQ_EMPTY(&in_ifaddrhead)) 1360 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr; 1361 else 1362#endif 1363 *tl++ = create_verf; 1364 *tl = ++create_verf; 1365 } else { 1366 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1367 nfsm_build(tl, u_long *, NFSX_V3SRVSATTR); 1368 sp3 = (struct nfsv3_sattr *)tl; 1369 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); 1370 } 1371 } else { 1372 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1373 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1374 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1375 sp->sa_gid = txdr_unsigned(vattr.va_gid); 1376 sp->sa_size = 0; 1377 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1378 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1379 } 1380 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred); 1381 if (!error) { 1382 nfsm_mtofh(dvp, newvp, v3, gotvp); 1383 if (!gotvp) { 1384 if (newvp) { 1385 vput(newvp); 1386 newvp = (struct vnode *)0; 1387 } 1388 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1389 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1390 if (!error) 1391 newvp = NFSTOV(np); 1392 } 1393 } 1394 if (v3) 1395 nfsm_wcc_data(dvp, wccflag); 1396 nfsm_reqdone; 1397 if (error) { 1398 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { 1399 fmode &= ~O_EXCL; 1400 goto again; 1401 } 1402 if (newvp) 1403 vput(newvp); 1404 } else if (v3 && (fmode & O_EXCL)) 1405 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); 1406 if (!error) { 1407 if (cnp->cn_flags & MAKEENTRY) 1408 cache_enter(dvp, newvp, cnp); 1409 *ap->a_vpp = newvp; 1410 } 1411 zfree(namei_zone, cnp->cn_pnbuf); 1412 VTONFS(dvp)->n_flag |= NMODIFIED; 1413 if (!wccflag) 1414 VTONFS(dvp)->n_attrstamp = 0; 1415 return (error); 1416} 1417 1418/* 1419 * nfs file remove call 1420 * To try and make nfs semantics closer to ufs semantics, a file that has 1421 * other processes using the vnode is renamed instead of removed and then 1422 * removed later on the last close. 1423 * - If v_usecount > 1 1424 * If a rename is not already in the works 1425 * call nfs_sillyrename() to set it up 1426 * else 1427 * do the remove rpc 1428 */ 1429static int 1430nfs_remove(ap) 1431 struct vop_remove_args /* { 1432 struct vnodeop_desc *a_desc; 1433 struct vnode * a_dvp; 1434 struct vnode * a_vp; 1435 struct componentname * a_cnp; 1436 } */ *ap; 1437{ 1438 register struct vnode *vp = ap->a_vp; 1439 register struct vnode *dvp = ap->a_dvp; 1440 register struct componentname *cnp = ap->a_cnp; 1441 register struct nfsnode *np = VTONFS(vp); 1442 int error = 0; 1443 struct vattr vattr; 1444 1445#ifndef DIAGNOSTIC 1446 if ((cnp->cn_flags & HASBUF) == 0) 1447 panic("nfs_remove: no name"); 1448 if (vp->v_usecount < 1) 1449 panic("nfs_remove: bad v_usecount"); 1450#endif 1451 if (vp->v_usecount == 1 || (np->n_sillyrename && 1452 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && 1453 vattr.va_nlink > 1)) { 1454 /* 1455 * Purge the name cache so that the chance of a lookup for 1456 * the name succeeding while the remove is in progress is 1457 * minimized. Without node locking it can still happen, such 1458 * that an I/O op returns ESTALE, but since you get this if 1459 * another host removes the file.. 1460 */ 1461 cache_purge(vp); 1462 /* 1463 * throw away biocache buffers, mainly to avoid 1464 * unnecessary delayed writes later. 1465 */ 1466 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1); 1467 /* Do the rpc */ 1468 if (error != EINTR) 1469 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1470 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); 1471 /* 1472 * Kludge City: If the first reply to the remove rpc is lost.. 1473 * the reply to the retransmitted request will be ENOENT 1474 * since the file was in fact removed 1475 * Therefore, we cheat and return success. 1476 */ 1477 if (error == ENOENT) 1478 error = 0; 1479 } else if (!np->n_sillyrename) 1480 error = nfs_sillyrename(dvp, vp, cnp); 1481 zfree(namei_zone, cnp->cn_pnbuf); 1482 np->n_attrstamp = 0; 1483 return (error); 1484} 1485 1486/* 1487 * nfs file remove rpc called from nfs_inactive 1488 */ 1489int 1490nfs_removeit(sp) 1491 register struct sillyrename *sp; 1492{ 1493 1494 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1495 (struct proc *)0)); 1496} 1497 1498/* 1499 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1500 */ 1501static int 1502nfs_removerpc(dvp, name, namelen, cred, proc) 1503 register struct vnode *dvp; 1504 const char *name; 1505 int namelen; 1506 struct ucred *cred; 1507 struct proc *proc; 1508{ 1509 register u_long *tl; 1510 register caddr_t cp; 1511 register long t1, t2; 1512 caddr_t bpos, dpos, cp2; 1513 int error = 0, wccflag = NFSV3_WCCRATTR; 1514 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1515 int v3 = NFS_ISV3(dvp); 1516 1517 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1518 nfsm_reqhead(dvp, NFSPROC_REMOVE, 1519 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); 1520 nfsm_fhtom(dvp, v3); 1521 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1522 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred); 1523 if (v3) 1524 nfsm_wcc_data(dvp, wccflag); 1525 nfsm_reqdone; 1526 VTONFS(dvp)->n_flag |= NMODIFIED; 1527 if (!wccflag) 1528 VTONFS(dvp)->n_attrstamp = 0; 1529 return (error); 1530} 1531 1532/* 1533 * nfs file rename call 1534 */ 1535static int 1536nfs_rename(ap) 1537 struct vop_rename_args /* { 1538 struct vnode *a_fdvp; 1539 struct vnode *a_fvp; 1540 struct componentname *a_fcnp; 1541 struct vnode *a_tdvp; 1542 struct vnode *a_tvp; 1543 struct componentname *a_tcnp; 1544 } */ *ap; 1545{ 1546 register struct vnode *fvp = ap->a_fvp; 1547 register struct vnode *tvp = ap->a_tvp; 1548 register struct vnode *fdvp = ap->a_fdvp; 1549 register struct vnode *tdvp = ap->a_tdvp; 1550 register struct componentname *tcnp = ap->a_tcnp; 1551 register struct componentname *fcnp = ap->a_fcnp; 1552 int error; 1553 1554#ifndef DIAGNOSTIC 1555 if ((tcnp->cn_flags & HASBUF) == 0 || 1556 (fcnp->cn_flags & HASBUF) == 0) 1557 panic("nfs_rename: no name"); 1558#endif 1559 /* Check for cross-device rename */ 1560 if ((fvp->v_mount != tdvp->v_mount) || 1561 (tvp && (fvp->v_mount != tvp->v_mount))) { 1562 error = EXDEV; 1563 goto out; 1564 } 1565 1566 /* 1567 * If the tvp exists and is in use, sillyrename it before doing the 1568 * rename of the new file over it. 1569 * XXX Can't sillyrename a directory. 1570 */ 1571 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1572 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1573 vput(tvp); 1574 tvp = NULL; 1575 } 1576 1577 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1578 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1579 tcnp->cn_proc); 1580 1581 if (fvp->v_type == VDIR) { 1582 if (tvp != NULL && tvp->v_type == VDIR) 1583 cache_purge(tdvp); 1584 cache_purge(fdvp); 1585 } 1586out: 1587 if (tdvp == tvp) 1588 vrele(tdvp); 1589 else 1590 vput(tdvp); 1591 if (tvp) 1592 vput(tvp); 1593 vrele(fdvp); 1594 vrele(fvp); 1595 /* 1596 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1597 */ 1598 if (error == ENOENT) 1599 error = 0; 1600 return (error); 1601} 1602 1603/* 1604 * nfs file rename rpc called from nfs_remove() above 1605 */ 1606static int 1607nfs_renameit(sdvp, scnp, sp) 1608 struct vnode *sdvp; 1609 struct componentname *scnp; 1610 register struct sillyrename *sp; 1611{ 1612 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1613 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc)); 1614} 1615 1616/* 1617 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1618 */ 1619static int 1620nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc) 1621 register struct vnode *fdvp; 1622 const char *fnameptr; 1623 int fnamelen; 1624 register struct vnode *tdvp; 1625 const char *tnameptr; 1626 int tnamelen; 1627 struct ucred *cred; 1628 struct proc *proc; 1629{ 1630 register u_long *tl; 1631 register caddr_t cp; 1632 register long t1, t2; 1633 caddr_t bpos, dpos, cp2; 1634 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1635 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1636 int v3 = NFS_ISV3(fdvp); 1637 1638 nfsstats.rpccnt[NFSPROC_RENAME]++; 1639 nfsm_reqhead(fdvp, NFSPROC_RENAME, 1640 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + 1641 nfsm_rndup(tnamelen)); 1642 nfsm_fhtom(fdvp, v3); 1643 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1644 nfsm_fhtom(tdvp, v3); 1645 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1646 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred); 1647 if (v3) { 1648 nfsm_wcc_data(fdvp, fwccflag); 1649 nfsm_wcc_data(tdvp, twccflag); 1650 } 1651 nfsm_reqdone; 1652 VTONFS(fdvp)->n_flag |= NMODIFIED; 1653 VTONFS(tdvp)->n_flag |= NMODIFIED; 1654 if (!fwccflag) 1655 VTONFS(fdvp)->n_attrstamp = 0; 1656 if (!twccflag) 1657 VTONFS(tdvp)->n_attrstamp = 0; 1658 return (error); 1659} 1660 1661/* 1662 * nfs hard link create call 1663 */ 1664static int 1665nfs_link(ap) 1666 struct vop_link_args /* { 1667 struct vnode *a_tdvp; 1668 struct vnode *a_vp; 1669 struct componentname *a_cnp; 1670 } */ *ap; 1671{ 1672 register struct vnode *vp = ap->a_vp; 1673 register struct vnode *tdvp = ap->a_tdvp; 1674 register struct componentname *cnp = ap->a_cnp; 1675 register u_long *tl; 1676 register caddr_t cp; 1677 register long t1, t2; 1678 caddr_t bpos, dpos, cp2; 1679 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 1680 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1681 int v3 = NFS_ISV3(vp); 1682 1683 if (vp->v_mount != tdvp->v_mount) { 1684 VOP_ABORTOP(tdvp, cnp); 1685 return (EXDEV); 1686 } 1687 1688 /* 1689 * Push all writes to the server, so that the attribute cache 1690 * doesn't get "out of sync" with the server. 1691 * XXX There should be a better way! 1692 */ 1693 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); 1694 1695 nfsstats.rpccnt[NFSPROC_LINK]++; 1696 nfsm_reqhead(vp, NFSPROC_LINK, 1697 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1698 nfsm_fhtom(vp, v3); 1699 nfsm_fhtom(tdvp, v3); 1700 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1701 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred); 1702 if (v3) { 1703 nfsm_postop_attr(vp, attrflag); 1704 nfsm_wcc_data(tdvp, wccflag); 1705 } 1706 nfsm_reqdone; 1707 zfree(namei_zone, cnp->cn_pnbuf); 1708 VTONFS(tdvp)->n_flag |= NMODIFIED; 1709 if (!attrflag) 1710 VTONFS(vp)->n_attrstamp = 0; 1711 if (!wccflag) 1712 VTONFS(tdvp)->n_attrstamp = 0; 1713 /* 1714 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 1715 */ 1716 if (error == EEXIST) 1717 error = 0; 1718 return (error); 1719} 1720 1721/* 1722 * nfs symbolic link create call 1723 */ 1724static int 1725nfs_symlink(ap) 1726 struct vop_symlink_args /* { 1727 struct vnode *a_dvp; 1728 struct vnode **a_vpp; 1729 struct componentname *a_cnp; 1730 struct vattr *a_vap; 1731 char *a_target; 1732 } */ *ap; 1733{ 1734 register struct vnode *dvp = ap->a_dvp; 1735 register struct vattr *vap = ap->a_vap; 1736 register struct componentname *cnp = ap->a_cnp; 1737 register struct nfsv2_sattr *sp; 1738 register struct nfsv3_sattr *sp3; 1739 register u_long *tl; 1740 register caddr_t cp; 1741 register long t1, t2; 1742 caddr_t bpos, dpos, cp2; 1743 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 1744 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1745 struct vnode *newvp = (struct vnode *)0; 1746 int v3 = NFS_ISV3(dvp); 1747 1748 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 1749 slen = strlen(ap->a_target); 1750 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + 1751 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 1752 nfsm_fhtom(dvp, v3); 1753 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1754 if (v3) { 1755 nfsm_build(sp3, struct nfsv3_sattr *, NFSX_V3SRVSATTR); 1756 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, 1757 cnp->cn_cred->cr_gid); 1758 } 1759 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 1760 if (!v3) { 1761 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1762 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 1763 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1764 sp->sa_gid = txdr_unsigned(cnp->cn_cred->cr_gid); 1765 sp->sa_size = -1; 1766 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1767 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1768 } 1769 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred); 1770 if (v3) { 1771 if (!error) 1772 nfsm_mtofh(dvp, newvp, v3, gotvp); 1773 nfsm_wcc_data(dvp, wccflag); 1774 } 1775 nfsm_reqdone; 1776 if (newvp) 1777 vput(newvp); 1778 zfree(namei_zone, cnp->cn_pnbuf); 1779 VTONFS(dvp)->n_flag |= NMODIFIED; 1780 if (!wccflag) 1781 VTONFS(dvp)->n_attrstamp = 0; 1782 /* 1783 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 1784 */ 1785 if (error == EEXIST) 1786 error = 0; 1787 return (error); 1788} 1789 1790/* 1791 * nfs make dir call 1792 */ 1793static int 1794nfs_mkdir(ap) 1795 struct vop_mkdir_args /* { 1796 struct vnode *a_dvp; 1797 struct vnode **a_vpp; 1798 struct componentname *a_cnp; 1799 struct vattr *a_vap; 1800 } */ *ap; 1801{ 1802 register struct vnode *dvp = ap->a_dvp; 1803 register struct vattr *vap = ap->a_vap; 1804 register struct componentname *cnp = ap->a_cnp; 1805 register struct nfsv2_sattr *sp; 1806 register struct nfsv3_sattr *sp3; 1807 register u_long *tl; 1808 register caddr_t cp; 1809 register long t1, t2; 1810 register int len; 1811 struct nfsnode *np = (struct nfsnode *)0; 1812 struct vnode *newvp = (struct vnode *)0; 1813 caddr_t bpos, dpos, cp2; 1814 int error = 0, wccflag = NFSV3_WCCRATTR; 1815 int gotvp = 0; 1816 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1817 struct vattr vattr; 1818 int v3 = NFS_ISV3(dvp); 1819 1820 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) { 1821 VOP_ABORTOP(dvp, cnp); 1822 return (error); 1823 } 1824 len = cnp->cn_namelen; 1825 nfsstats.rpccnt[NFSPROC_MKDIR]++; 1826 nfsm_reqhead(dvp, NFSPROC_MKDIR, 1827 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); 1828 nfsm_fhtom(dvp, v3); 1829 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 1830 if (v3) { 1831 nfsm_build(sp3, struct nfsv3_sattr *, NFSX_V3SRVSATTR); 1832 nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); 1833 } else { 1834 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1835 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 1836 sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); 1837 sp->sa_gid = txdr_unsigned(vattr.va_gid); 1838 sp->sa_size = -1; 1839 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1840 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1841 } 1842 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred); 1843 if (!error) 1844 nfsm_mtofh(dvp, newvp, v3, gotvp); 1845 if (v3) 1846 nfsm_wcc_data(dvp, wccflag); 1847 nfsm_reqdone; 1848 VTONFS(dvp)->n_flag |= NMODIFIED; 1849 if (!wccflag) 1850 VTONFS(dvp)->n_attrstamp = 0; 1851 /* 1852 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 1853 * if we can succeed in looking up the directory. 1854 */ 1855 if (error == EEXIST || (!error && !gotvp)) { 1856 if (newvp) { 1857 vrele(newvp); 1858 newvp = (struct vnode *)0; 1859 } 1860 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 1861 cnp->cn_proc, &np); 1862 if (!error) { 1863 newvp = NFSTOV(np); 1864 if (newvp->v_type != VDIR) 1865 error = EEXIST; 1866 } 1867 } 1868 if (error) { 1869 if (newvp) 1870 vrele(newvp); 1871 } else 1872 *ap->a_vpp = newvp; 1873 zfree(namei_zone, cnp->cn_pnbuf); 1874 return (error); 1875} 1876 1877/* 1878 * nfs remove directory call 1879 */ 1880static int 1881nfs_rmdir(ap) 1882 struct vop_rmdir_args /* { 1883 struct vnode *a_dvp; 1884 struct vnode *a_vp; 1885 struct componentname *a_cnp; 1886 } */ *ap; 1887{ 1888 register struct vnode *vp = ap->a_vp; 1889 register struct vnode *dvp = ap->a_dvp; 1890 register struct componentname *cnp = ap->a_cnp; 1891 register u_long *tl; 1892 register caddr_t cp; 1893 register long t1, t2; 1894 caddr_t bpos, dpos, cp2; 1895 int error = 0, wccflag = NFSV3_WCCRATTR; 1896 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1897 int v3 = NFS_ISV3(dvp); 1898 1899 nfsstats.rpccnt[NFSPROC_RMDIR]++; 1900 nfsm_reqhead(dvp, NFSPROC_RMDIR, 1901 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1902 nfsm_fhtom(dvp, v3); 1903 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1904 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred); 1905 if (v3) 1906 nfsm_wcc_data(dvp, wccflag); 1907 nfsm_reqdone; 1908 zfree(namei_zone, cnp->cn_pnbuf); 1909 VTONFS(dvp)->n_flag |= NMODIFIED; 1910 if (!wccflag) 1911 VTONFS(dvp)->n_attrstamp = 0; 1912 cache_purge(dvp); 1913 cache_purge(vp); 1914 /* 1915 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 1916 */ 1917 if (error == ENOENT) 1918 error = 0; 1919 return (error); 1920} 1921 1922/* 1923 * nfs readdir call 1924 */ 1925static int 1926nfs_readdir(ap) 1927 struct vop_readdir_args /* { 1928 struct vnode *a_vp; 1929 struct uio *a_uio; 1930 struct ucred *a_cred; 1931 } */ *ap; 1932{ 1933 register struct vnode *vp = ap->a_vp; 1934 register struct nfsnode *np = VTONFS(vp); 1935 register struct uio *uio = ap->a_uio; 1936 int tresid, error; 1937 struct vattr vattr; 1938 1939 if (vp->v_type != VDIR) 1940 return (EPERM); 1941 /* 1942 * First, check for hit on the EOF offset cache 1943 */ 1944 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && 1945 (np->n_flag & NMODIFIED) == 0) { 1946 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) { 1947 if (NQNFS_CKCACHABLE(vp, ND_READ)) { 1948 nfsstats.direofcache_hits++; 1949 return (0); 1950 } 1951 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && 1952 np->n_mtime == vattr.va_mtime.tv_sec) { 1953 nfsstats.direofcache_hits++; 1954 return (0); 1955 } 1956 } 1957 1958 /* 1959 * Call nfs_bioread() to do the real work. 1960 */ 1961 tresid = uio->uio_resid; 1962 error = nfs_bioread(vp, uio, 0, ap->a_cred, 0); 1963 1964 if (!error && uio->uio_resid == tresid) 1965 nfsstats.direofcache_misses++; 1966 return (error); 1967} 1968 1969/* 1970 * Readdir rpc call. 1971 * Called from below the buffer cache by nfs_doio(). 1972 */ 1973int 1974nfs_readdirrpc(vp, uiop, cred) 1975 struct vnode *vp; 1976 register struct uio *uiop; 1977 struct ucred *cred; 1978 1979{ 1980 register int len, left; 1981 register struct dirent *dp; 1982 register u_long *tl; 1983 register caddr_t cp; 1984 register long t1, t2; 1985 register nfsuint64 *cookiep; 1986 caddr_t bpos, dpos, cp2; 1987 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 1988 nfsuint64 cookie; 1989 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1990 struct nfsnode *dnp = VTONFS(vp); 1991 u_quad_t fileno; 1992 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; 1993 int attrflag; 1994 int v3 = NFS_ISV3(vp); 1995 1996#ifndef nolint 1997 dp = (struct dirent *)0; 1998#endif 1999#ifndef DIAGNOSTIC 2000 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (NFS_DIRBLKSIZ - 1)) || 2001 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2002 panic("nfs readdirrpc bad uio"); 2003#endif 2004 2005 /* 2006 * If there is no cookie, assume directory was stale. 2007 */ 2008 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0); 2009 if (cookiep) 2010 cookie = *cookiep; 2011 else 2012 return (NFSERR_BAD_COOKIE); 2013 /* 2014 * Loop around doing readdir rpc's of size nm_readdirsize 2015 * truncated to a multiple of DIRBLKSIZ. 2016 * The stopping criteria is EOF or buffer full. 2017 */ 2018 while (more_dirs && bigenough) { 2019 nfsstats.rpccnt[NFSPROC_READDIR]++; 2020 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) + 2021 NFSX_READDIR(v3)); 2022 nfsm_fhtom(vp, v3); 2023 if (v3) { 2024 nfsm_build(tl, u_long *, 5 * NFSX_UNSIGNED); 2025 *tl++ = cookie.nfsuquad[0]; 2026 *tl++ = cookie.nfsuquad[1]; 2027 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2028 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2029 } else { 2030 nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); 2031 *tl++ = cookie.nfsuquad[0]; 2032 } 2033 *tl = txdr_unsigned(nmp->nm_readdirsize); 2034 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred); 2035 if (v3) { 2036 nfsm_postop_attr(vp, attrflag); 2037 if (!error) { 2038 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 2039 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2040 dnp->n_cookieverf.nfsuquad[1] = *tl; 2041 } else { 2042 m_freem(mrep); 2043 goto nfsmout; 2044 } 2045 } 2046 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2047 more_dirs = fxdr_unsigned(int, *tl); 2048 2049 /* loop thru the dir entries, doctoring them to 4bsd form */ 2050 while (more_dirs && bigenough) { 2051 if (v3) { 2052 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2053 fxdr_hyper(tl, &fileno); 2054 len = fxdr_unsigned(int, *(tl + 2)); 2055 } else { 2056 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 2057 fileno = fxdr_unsigned(u_quad_t, *tl++); 2058 len = fxdr_unsigned(int, *tl); 2059 } 2060 if (len <= 0 || len > NFS_MAXNAMLEN) { 2061 error = EBADRPC; 2062 m_freem(mrep); 2063 goto nfsmout; 2064 } 2065 tlen = nfsm_rndup(len); 2066 if (tlen == len) 2067 tlen += 4; /* To ensure null termination */ 2068 left = DIRBLKSIZ - blksiz; 2069 if ((tlen + DIRHDSIZ) > left) { 2070 dp->d_reclen += left; 2071 uiop->uio_iov->iov_base += left; 2072 uiop->uio_iov->iov_len -= left; 2073 uiop->uio_offset += left; 2074 uiop->uio_resid -= left; 2075 blksiz = 0; 2076 } 2077 if ((tlen + DIRHDSIZ) > uiop->uio_resid) 2078 bigenough = 0; 2079 if (bigenough) { 2080 dp = (struct dirent *)uiop->uio_iov->iov_base; 2081 dp->d_fileno = (int)fileno; 2082 dp->d_namlen = len; 2083 dp->d_reclen = tlen + DIRHDSIZ; 2084 dp->d_type = DT_UNKNOWN; 2085 blksiz += dp->d_reclen; 2086 if (blksiz == DIRBLKSIZ) 2087 blksiz = 0; 2088 uiop->uio_offset += DIRHDSIZ; 2089 uiop->uio_resid -= DIRHDSIZ; 2090 uiop->uio_iov->iov_base += DIRHDSIZ; 2091 uiop->uio_iov->iov_len -= DIRHDSIZ; 2092 nfsm_mtouio(uiop, len); 2093 cp = uiop->uio_iov->iov_base; 2094 tlen -= len; 2095 *cp = '\0'; /* null terminate */ 2096 uiop->uio_iov->iov_base += tlen; 2097 uiop->uio_iov->iov_len -= tlen; 2098 uiop->uio_offset += tlen; 2099 uiop->uio_resid -= tlen; 2100 } else 2101 nfsm_adv(nfsm_rndup(len)); 2102 if (v3) { 2103 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2104 } else { 2105 nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); 2106 } 2107 if (bigenough) { 2108 cookie.nfsuquad[0] = *tl++; 2109 if (v3) 2110 cookie.nfsuquad[1] = *tl++; 2111 } else if (v3) 2112 tl += 2; 2113 else 2114 tl++; 2115 more_dirs = fxdr_unsigned(int, *tl); 2116 } 2117 /* 2118 * If at end of rpc data, get the eof boolean 2119 */ 2120 if (!more_dirs) { 2121 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2122 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2123 } 2124 m_freem(mrep); 2125 } 2126 /* 2127 * Fill last record, iff any, out to a multiple of DIRBLKSIZ 2128 * by increasing d_reclen for the last record. 2129 */ 2130 if (blksiz > 0) { 2131 left = DIRBLKSIZ - blksiz; 2132 dp->d_reclen += left; 2133 uiop->uio_iov->iov_base += left; 2134 uiop->uio_iov->iov_len -= left; 2135 uiop->uio_offset += left; 2136 uiop->uio_resid -= left; 2137 } 2138 2139 /* 2140 * We are now either at the end of the directory or have filled the 2141 * block. 2142 */ 2143 if (bigenough) 2144 dnp->n_direofoffset = uiop->uio_offset; 2145 else { 2146 if (uiop->uio_resid > 0) 2147 printf("EEK! readdirrpc resid > 0\n"); 2148 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1); 2149 *cookiep = cookie; 2150 } 2151nfsmout: 2152 return (error); 2153} 2154 2155/* 2156 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2157 */ 2158int 2159nfs_readdirplusrpc(vp, uiop, cred) 2160 struct vnode *vp; 2161 register struct uio *uiop; 2162 struct ucred *cred; 2163{ 2164 register int len, left; 2165 register struct dirent *dp; 2166 register u_long *tl; 2167 register caddr_t cp; 2168 register long t1, t2; 2169 register struct vnode *newvp; 2170 register nfsuint64 *cookiep; 2171 caddr_t bpos, dpos, cp2, dpossav1, dpossav2; 2172 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2; 2173 struct nameidata nami, *ndp = &nami; 2174 struct componentname *cnp = &ndp->ni_cnd; 2175 nfsuint64 cookie; 2176 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2177 struct nfsnode *dnp = VTONFS(vp), *np; 2178 nfsfh_t *fhp; 2179 u_quad_t fileno; 2180 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2181 int attrflag, fhsize; 2182 2183#ifndef nolint 2184 dp = (struct dirent *)0; 2185#endif 2186#ifndef DIAGNOSTIC 2187 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) || 2188 (uiop->uio_resid & (DIRBLKSIZ - 1))) 2189 panic("nfs readdirplusrpc bad uio"); 2190#endif 2191 ndp->ni_dvp = vp; 2192 newvp = NULLVP; 2193 2194 /* 2195 * If there is no cookie, assume directory was stale. 2196 */ 2197 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0); 2198 if (cookiep) 2199 cookie = *cookiep; 2200 else 2201 return (NFSERR_BAD_COOKIE); 2202 /* 2203 * Loop around doing readdir rpc's of size nm_readdirsize 2204 * truncated to a multiple of DIRBLKSIZ. 2205 * The stopping criteria is EOF or buffer full. 2206 */ 2207 while (more_dirs && bigenough) { 2208 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2209 nfsm_reqhead(vp, NFSPROC_READDIRPLUS, 2210 NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2211 nfsm_fhtom(vp, 1); 2212 nfsm_build(tl, u_long *, 6 * NFSX_UNSIGNED); 2213 *tl++ = cookie.nfsuquad[0]; 2214 *tl++ = cookie.nfsuquad[1]; 2215 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2216 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2217 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2218 *tl = txdr_unsigned(nmp->nm_rsize); 2219 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred); 2220 nfsm_postop_attr(vp, attrflag); 2221 if (error) { 2222 m_freem(mrep); 2223 goto nfsmout; 2224 } 2225 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2226 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2227 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2228 more_dirs = fxdr_unsigned(int, *tl); 2229 2230 /* loop thru the dir entries, doctoring them to 4bsd form */ 2231 while (more_dirs && bigenough) { 2232 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2233 fxdr_hyper(tl, &fileno); 2234 len = fxdr_unsigned(int, *(tl + 2)); 2235 if (len <= 0 || len > NFS_MAXNAMLEN) { 2236 error = EBADRPC; 2237 m_freem(mrep); 2238 goto nfsmout; 2239 } 2240 tlen = nfsm_rndup(len); 2241 if (tlen == len) 2242 tlen += 4; /* To ensure null termination*/ 2243 left = DIRBLKSIZ - blksiz; 2244 if ((tlen + DIRHDSIZ) > left) { 2245 dp->d_reclen += left; 2246 uiop->uio_iov->iov_base += left; 2247 uiop->uio_iov->iov_len -= left; 2248 uiop->uio_offset += left; 2249 uiop->uio_resid -= left; 2250 blksiz = 0; 2251 } 2252 if ((tlen + DIRHDSIZ) > uiop->uio_resid) 2253 bigenough = 0; 2254 if (bigenough) { 2255 dp = (struct dirent *)uiop->uio_iov->iov_base; 2256 dp->d_fileno = (int)fileno; 2257 dp->d_namlen = len; 2258 dp->d_reclen = tlen + DIRHDSIZ; 2259 dp->d_type = DT_UNKNOWN; 2260 blksiz += dp->d_reclen; 2261 if (blksiz == DIRBLKSIZ) 2262 blksiz = 0; 2263 uiop->uio_offset += DIRHDSIZ; 2264 uiop->uio_resid -= DIRHDSIZ; 2265 uiop->uio_iov->iov_base += DIRHDSIZ; 2266 uiop->uio_iov->iov_len -= DIRHDSIZ; 2267 cnp->cn_nameptr = uiop->uio_iov->iov_base; 2268 cnp->cn_namelen = len; 2269 nfsm_mtouio(uiop, len); 2270 cp = uiop->uio_iov->iov_base; 2271 tlen -= len; 2272 *cp = '\0'; 2273 uiop->uio_iov->iov_base += tlen; 2274 uiop->uio_iov->iov_len -= tlen; 2275 uiop->uio_offset += tlen; 2276 uiop->uio_resid -= tlen; 2277 } else 2278 nfsm_adv(nfsm_rndup(len)); 2279 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); 2280 if (bigenough) { 2281 cookie.nfsuquad[0] = *tl++; 2282 cookie.nfsuquad[1] = *tl++; 2283 } else 2284 tl += 2; 2285 2286 /* 2287 * Since the attributes are before the file handle 2288 * (sigh), we must skip over the attributes and then 2289 * come back and get them. 2290 */ 2291 attrflag = fxdr_unsigned(int, *tl); 2292 if (attrflag) { 2293 dpossav1 = dpos; 2294 mdsav1 = md; 2295 nfsm_adv(NFSX_V3FATTR); 2296 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2297 doit = fxdr_unsigned(int, *tl); 2298 if (doit) { 2299 nfsm_getfh(fhp, fhsize, 1); 2300 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2301 VREF(vp); 2302 newvp = vp; 2303 np = dnp; 2304 } else { 2305 error = nfs_nget(vp->v_mount, fhp, 2306 fhsize, &np); 2307 if (error) 2308 doit = 0; 2309 else 2310 newvp = NFSTOV(np); 2311 } 2312 } 2313 if (doit) { 2314 dpossav2 = dpos; 2315 dpos = dpossav1; 2316 mdsav2 = md; 2317 md = mdsav1; 2318 nfsm_loadattr(newvp, (struct vattr *)0); 2319 dpos = dpossav2; 2320 md = mdsav2; 2321 dp->d_type = 2322 IFTODT(VTTOIF(np->n_vattr.va_type)); 2323 ndp->ni_vp = newvp; 2324 cnp->cn_hash = 0; 2325 for (cp = cnp->cn_nameptr, i = 1; i <= len; 2326 i++, cp++) 2327 cnp->cn_hash += (unsigned char)*cp * i; 2328 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp); 2329 } 2330 } else { 2331 /* Just skip over the file handle */ 2332 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2333 i = fxdr_unsigned(int, *tl); 2334 nfsm_adv(nfsm_rndup(i)); 2335 } 2336 if (newvp != NULLVP) { 2337 vrele(newvp); 2338 newvp = NULLVP; 2339 } 2340 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2341 more_dirs = fxdr_unsigned(int, *tl); 2342 } 2343 /* 2344 * If at end of rpc data, get the eof boolean 2345 */ 2346 if (!more_dirs) { 2347 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 2348 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2349 } 2350 m_freem(mrep); 2351 } 2352 /* 2353 * Fill last record, iff any, out to a multiple of NFS_DIRBLKSIZ 2354 * by increasing d_reclen for the last record. 2355 */ 2356 if (blksiz > 0) { 2357 left = DIRBLKSIZ - blksiz; 2358 dp->d_reclen += left; 2359 uiop->uio_iov->iov_base += left; 2360 uiop->uio_iov->iov_len -= left; 2361 uiop->uio_offset += left; 2362 uiop->uio_resid -= left; 2363 } 2364 2365 /* 2366 * We are now either at the end of the directory or have filled the 2367 * block. 2368 */ 2369 if (bigenough) 2370 dnp->n_direofoffset = uiop->uio_offset; 2371 else { 2372 if (uiop->uio_resid > 0) 2373 printf("EEK! readdirplusrpc resid > 0\n"); 2374 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1); 2375 *cookiep = cookie; 2376 } 2377nfsmout: 2378 if (newvp != NULLVP) { 2379 if (newvp == vp) 2380 vrele(newvp); 2381 else 2382 vput(newvp); 2383 newvp = NULLVP; 2384 } 2385 return (error); 2386} 2387 2388/* 2389 * Silly rename. To make the NFS filesystem that is stateless look a little 2390 * more like the "ufs" a remove of an active vnode is translated to a rename 2391 * to a funny looking filename that is removed by nfs_inactive on the 2392 * nfsnode. There is the potential for another process on a different client 2393 * to create the same funny name between the nfs_lookitup() fails and the 2394 * nfs_rename() completes, but... 2395 */ 2396static int 2397nfs_sillyrename(dvp, vp, cnp) 2398 struct vnode *dvp, *vp; 2399 struct componentname *cnp; 2400{ 2401 register struct sillyrename *sp; 2402 struct nfsnode *np; 2403 int error; 2404 short pid; 2405 2406 cache_purge(dvp); 2407 np = VTONFS(vp); 2408#ifndef DIAGNOSTIC 2409 if (vp->v_type == VDIR) 2410 panic("nfs: sillyrename dir"); 2411#endif 2412 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), 2413 M_NFSREQ, M_WAITOK); 2414 sp->s_cred = crdup(cnp->cn_cred); 2415 sp->s_dvp = dvp; 2416 VREF(dvp); 2417 2418 /* Fudge together a funny name */ 2419 pid = cnp->cn_proc->p_pid; 2420 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid); 2421 2422 /* Try lookitups until we get one that isn't there */ 2423 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2424 cnp->cn_proc, (struct nfsnode **)0) == 0) { 2425 sp->s_name[4]++; 2426 if (sp->s_name[4] > 'z') { 2427 error = EINVAL; 2428 goto bad; 2429 } 2430 } 2431 error = nfs_renameit(dvp, cnp, sp); 2432 if (error) 2433 goto bad; 2434 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2435 cnp->cn_proc, &np); 2436 np->n_sillyrename = sp; 2437 return (0); 2438bad: 2439 vrele(sp->s_dvp); 2440 crfree(sp->s_cred); 2441 free((caddr_t)sp, M_NFSREQ); 2442 return (error); 2443} 2444 2445/* 2446 * Look up a file name and optionally either update the file handle or 2447 * allocate an nfsnode, depending on the value of npp. 2448 * npp == NULL --> just do the lookup 2449 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2450 * handled too 2451 * *npp != NULL --> update the file handle in the vnode 2452 */ 2453static int 2454nfs_lookitup(dvp, name, len, cred, procp, npp) 2455 register struct vnode *dvp; 2456 const char *name; 2457 int len; 2458 struct ucred *cred; 2459 struct proc *procp; 2460 struct nfsnode **npp; 2461{ 2462 register u_long *tl; 2463 register caddr_t cp; 2464 register long t1, t2; 2465 struct vnode *newvp = (struct vnode *)0; 2466 struct nfsnode *np, *dnp = VTONFS(dvp); 2467 caddr_t bpos, dpos, cp2; 2468 int error = 0, fhlen, attrflag; 2469 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 2470 nfsfh_t *nfhp; 2471 int v3 = NFS_ISV3(dvp); 2472 2473 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2474 nfsm_reqhead(dvp, NFSPROC_LOOKUP, 2475 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 2476 nfsm_fhtom(dvp, v3); 2477 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2478 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred); 2479 if (npp && !error) { 2480 nfsm_getfh(nfhp, fhlen, v3); 2481 if (*npp) { 2482 np = *npp; 2483 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { 2484 free((caddr_t)np->n_fhp, M_NFSBIGFH); 2485 np->n_fhp = &np->n_fh; 2486 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH) 2487 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK); 2488 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen); 2489 np->n_fhsize = fhlen; 2490 newvp = NFSTOV(np); 2491 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2492 VREF(dvp); 2493 newvp = dvp; 2494 } else { 2495 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2496 if (error) { 2497 m_freem(mrep); 2498 return (error); 2499 } 2500 newvp = NFSTOV(np); 2501 } 2502 if (v3) { 2503 nfsm_postop_attr(newvp, attrflag); 2504 if (!attrflag && *npp == NULL) { 2505 m_freem(mrep); 2506 if (newvp == dvp) 2507 vrele(newvp); 2508 else 2509 vput(newvp); 2510 return (ENOENT); 2511 } 2512 } else 2513 nfsm_loadattr(newvp, (struct vattr *)0); 2514 } 2515 nfsm_reqdone; 2516 if (npp && *npp == NULL) { 2517 if (error) { 2518 if (newvp) 2519 if (newvp == dvp) 2520 vrele(newvp); 2521 else 2522 vput(newvp); 2523 } else 2524 *npp = np; 2525 } 2526 return (error); 2527} 2528 2529/* 2530 * Nfs Version 3 commit rpc 2531 */ 2532static int 2533nfs_commit(vp, offset, cnt, cred, procp) 2534 register struct vnode *vp; 2535 u_quad_t offset; 2536 int cnt; 2537 struct ucred *cred; 2538 struct proc *procp; 2539{ 2540 register caddr_t cp; 2541 register u_long *tl; 2542 register int t1, t2; 2543 register struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2544 caddr_t bpos, dpos, cp2; 2545 int error = 0, wccflag = NFSV3_WCCRATTR; 2546 struct mbuf *mreq, *mrep, *md, *mb, *mb2; 2547 2548 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) 2549 return (0); 2550 nfsstats.rpccnt[NFSPROC_COMMIT]++; 2551 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1)); 2552 nfsm_fhtom(vp, 1); 2553 nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); 2554 txdr_hyper(&offset, tl); 2555 tl += 2; 2556 *tl = txdr_unsigned(cnt); 2557 nfsm_request(vp, NFSPROC_COMMIT, procp, cred); 2558 nfsm_wcc_data(vp, wccflag); 2559 if (!error) { 2560 nfsm_dissect(tl, u_long *, NFSX_V3WRITEVERF); 2561 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl, 2562 NFSX_V3WRITEVERF)) { 2563 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 2564 NFSX_V3WRITEVERF); 2565 error = NFSERR_STALEWRITEVERF; 2566 } 2567 } 2568 nfsm_reqdone; 2569 return (error); 2570} 2571 2572/* 2573 * Kludge City.. 2574 * - make nfs_bmap() essentially a no-op that does no translation 2575 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 2576 * (Maybe I could use the process's page mapping, but I was concerned that 2577 * Kernel Write might not be enabled and also figured copyout() would do 2578 * a lot more work than bcopy() and also it currently happens in the 2579 * context of the swapper process (2). 2580 */ 2581static int 2582nfs_bmap(ap) 2583 struct vop_bmap_args /* { 2584 struct vnode *a_vp; 2585 daddr_t a_bn; 2586 struct vnode **a_vpp; 2587 daddr_t *a_bnp; 2588 int *a_runp; 2589 int *a_runb; 2590 } */ *ap; 2591{ 2592 register struct vnode *vp = ap->a_vp; 2593 2594 if (ap->a_vpp != NULL) 2595 *ap->a_vpp = vp; 2596 if (ap->a_bnp != NULL) 2597 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize); 2598 if (ap->a_runp != NULL) 2599 *ap->a_runp = 0; 2600 if (ap->a_runb != NULL) 2601 *ap->a_runb = 0; 2602 return (0); 2603} 2604 2605/* 2606 * Strategy routine. 2607 * For async requests when nfsiod(s) are running, queue the request by 2608 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 2609 * request. 2610 */ 2611static int 2612nfs_strategy(ap) 2613 struct vop_strategy_args *ap; 2614{ 2615 register struct buf *bp = ap->a_bp; 2616 struct ucred *cr; 2617 struct proc *p; 2618 int error = 0; 2619 2620 if (bp->b_flags & B_PHYS) 2621 panic("nfs physio"); 2622 if (bp->b_flags & B_ASYNC) 2623 p = (struct proc *)0; 2624 else 2625 p = curproc; /* XXX */ 2626 if (bp->b_flags & B_READ) 2627 cr = bp->b_rcred; 2628 else 2629 cr = bp->b_wcred; 2630 /* 2631 * If the op is asynchronous and an i/o daemon is waiting 2632 * queue the request, wake it up and wait for completion 2633 * otherwise just do it ourselves. 2634 */ 2635 if ((bp->b_flags & B_ASYNC) == 0 || 2636 nfs_asyncio(bp, NOCRED)) 2637 error = nfs_doio(bp, cr, p); 2638 return (error); 2639} 2640 2641/* 2642 * Mmap a file 2643 * 2644 * NB Currently unsupported. 2645 */ 2646/* ARGSUSED */ 2647static int 2648nfs_mmap(ap) 2649 struct vop_mmap_args /* { 2650 struct vnode *a_vp; 2651 int a_fflags; 2652 struct ucred *a_cred; 2653 struct proc *a_p; 2654 } */ *ap; 2655{ 2656 2657 return (EINVAL); 2658} 2659 2660/* 2661 * fsync vnode op. Just call nfs_flush() with commit == 1. 2662 */ 2663/* ARGSUSED */ 2664static int 2665nfs_fsync(ap) 2666 struct vop_fsync_args /* { 2667 struct vnodeop_desc *a_desc; 2668 struct vnode * a_vp; 2669 struct ucred * a_cred; 2670 int a_waitfor; 2671 struct proc * a_p; 2672 } */ *ap; 2673{ 2674 2675 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); 2676} 2677 2678/* 2679 * Flush all the blocks associated with a vnode. 2680 * Walk through the buffer pool and push any dirty pages 2681 * associated with the vnode. 2682 */ 2683static int 2684nfs_flush(vp, cred, waitfor, p, commit) 2685 register struct vnode *vp; 2686 struct ucred *cred; 2687 int waitfor; 2688 struct proc *p; 2689 int commit; 2690{ 2691 register struct nfsnode *np = VTONFS(vp); 2692 register struct buf *bp; 2693 register int i; 2694 struct buf *nbp; 2695 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2696 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2697 int passone = 1; 2698 u_quad_t off, endoff, toff; 2699 struct ucred* wcred = NULL; 2700 struct buf **bvec = NULL; 2701#ifndef NFS_COMMITBVECSIZ 2702#define NFS_COMMITBVECSIZ 20 2703#endif 2704 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; 2705 int bvecsize = 0, bveccount; 2706 2707 if (nmp->nm_flag & NFSMNT_INT) 2708 slpflag = PCATCH; 2709 if (!commit) 2710 passone = 0; 2711 /* 2712 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2713 * server, but nas not been committed to stable storage on the server 2714 * yet. On the first pass, the byte range is worked out and the commit 2715 * rpc is done. On the second pass, nfs_writebp() is called to do the 2716 * job. 2717 */ 2718again: 2719 off = (u_quad_t)-1; 2720 endoff = 0; 2721 bvecpos = 0; 2722 if (NFS_ISV3(vp) && commit) { 2723 s = splbio(); 2724 /* 2725 * Count up how many buffers waiting for a commit. 2726 */ 2727 bveccount = 0; 2728 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 2729 nbp = bp->b_vnbufs.le_next; 2730 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2731 == (B_DELWRI | B_NEEDCOMMIT)) 2732 bveccount++; 2733 } 2734 /* 2735 * Allocate space to remember the list of bufs to commit. It is 2736 * important to use M_NOWAIT here to avoid a race with nfs_write. 2737 * If we can't get memory (for whatever reason), we will end up 2738 * committing the buffers one-by-one in the loop below. 2739 */ 2740 if (bveccount > NFS_COMMITBVECSIZ) { 2741 if (bvec != NULL && bvec != bvec_on_stack) 2742 free(bvec, M_TEMP); 2743 bvec = (struct buf **) 2744 malloc(bveccount * sizeof(struct buf *), 2745 M_TEMP, M_NOWAIT); 2746 if (bvec == NULL) { 2747 bvec = bvec_on_stack; 2748 bvecsize = NFS_COMMITBVECSIZ; 2749 } else 2750 bvecsize = bveccount; 2751 } else { 2752 bvec = bvec_on_stack; 2753 bvecsize = NFS_COMMITBVECSIZ; 2754 } 2755 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 2756 nbp = bp->b_vnbufs.le_next; 2757 if (bvecpos >= bvecsize) 2758 break; 2759 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2760 != (B_DELWRI | B_NEEDCOMMIT)) 2761 continue; 2762 bremfree(bp); 2763 /* 2764 * Work out if all buffers are using the same cred 2765 * so we can deal with them all with one commit. 2766 */ 2767 if (wcred == NULL) 2768 wcred = bp->b_wcred; 2769 else if (wcred != bp->b_wcred) 2770 wcred = NOCRED; 2771 bp->b_flags |= (B_BUSY | B_WRITEINPROG); 2772 vfs_busy_pages(bp, 1); 2773 /* 2774 * A list of these buffers is kept so that the 2775 * second loop knows which buffers have actually 2776 * been committed. This is necessary, since there 2777 * may be a race between the commit rpc and new 2778 * uncommitted writes on the file. 2779 */ 2780 bvec[bvecpos++] = bp; 2781 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2782 bp->b_dirtyoff; 2783 if (toff < off) 2784 off = toff; 2785 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2786 if (toff > endoff) 2787 endoff = toff; 2788 } 2789 splx(s); 2790 } 2791 if (bvecpos > 0) { 2792 /* 2793 * Commit data on the server, as required. 2794 * If all bufs are using the same wcred, then use that with 2795 * one call for all of them, otherwise commit each one 2796 * separately. 2797 */ 2798 if (wcred != NOCRED) 2799 retv = nfs_commit(vp, off, (int)(endoff - off), 2800 wcred, p); 2801 else { 2802 retv = 0; 2803 for (i = 0; i < bvecpos; i++) { 2804 off_t off, size; 2805 bp = bvec[i]; 2806 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2807 bp->b_dirtyoff; 2808 size = (u_quad_t)(bp->b_dirtyend 2809 - bp->b_dirtyoff); 2810 retv = nfs_commit(vp, off, (int)size, 2811 bp->b_wcred, p); 2812 if (retv) break; 2813 } 2814 } 2815 2816 if (retv == NFSERR_STALEWRITEVERF) 2817 nfs_clearcommit(vp->v_mount); 2818 /* 2819 * Now, either mark the blocks I/O done or mark the 2820 * blocks dirty, depending on whether the commit 2821 * succeeded. 2822 */ 2823 for (i = 0; i < bvecpos; i++) { 2824 bp = bvec[i]; 2825 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG); 2826 if (retv) { 2827 vfs_unbusy_pages(bp); 2828 brelse(bp); 2829 } else { 2830 vp->v_numoutput++; 2831 bp->b_flags |= B_ASYNC; 2832 if (bp->b_flags & B_DELWRI) { 2833 --numdirtybuffers; 2834 if (needsbuffer) { 2835 vfs_bio_need_satisfy(); 2836 } 2837 } 2838 s = splbio(); /* XXX check this positionning */ 2839 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI); 2840 bp->b_dirtyoff = bp->b_dirtyend = 0; 2841 reassignbuf(bp, vp); 2842 splx(s); 2843 biodone(bp); 2844 } 2845 } 2846 } 2847 2848 /* 2849 * Start/do any write(s) that are required. 2850 */ 2851loop: 2852 s = splbio(); 2853 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 2854 nbp = bp->b_vnbufs.le_next; 2855 if (bp->b_flags & B_BUSY) { 2856 if (waitfor != MNT_WAIT || passone) 2857 continue; 2858 bp->b_flags |= B_WANTED; 2859 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 2860 "nfsfsync", slptimeo); 2861 splx(s); 2862 if (error) { 2863 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { 2864 error = EINTR; 2865 goto done; 2866 } 2867 if (slpflag == PCATCH) { 2868 slpflag = 0; 2869 slptimeo = 2 * hz; 2870 } 2871 } 2872 goto loop; 2873 } 2874 if ((bp->b_flags & B_DELWRI) == 0) 2875 panic("nfs_fsync: not dirty"); 2876 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) 2877 continue; 2878 bremfree(bp); 2879 if (passone || !commit) 2880 bp->b_flags |= (B_BUSY|B_ASYNC); 2881 else 2882 bp->b_flags |= (B_BUSY|B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); 2883 splx(s); 2884 VOP_BWRITE(bp); 2885 goto loop; 2886 } 2887 splx(s); 2888 if (passone) { 2889 passone = 0; 2890 goto again; 2891 } 2892 if (waitfor == MNT_WAIT) { 2893 while (vp->v_numoutput) { 2894 vp->v_flag |= VBWAIT; 2895 error = tsleep((caddr_t)&vp->v_numoutput, 2896 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo); 2897 if (error) { 2898 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { 2899 error = EINTR; 2900 goto done; 2901 } 2902 if (slpflag == PCATCH) { 2903 slpflag = 0; 2904 slptimeo = 2 * hz; 2905 } 2906 } 2907 } 2908 if (vp->v_dirtyblkhd.lh_first && commit) { 2909 goto loop; 2910 } 2911 } 2912 if (np->n_flag & NWRITEERR) { 2913 error = np->n_error; 2914 np->n_flag &= ~NWRITEERR; 2915 } 2916done: 2917 if (bvec != NULL && bvec != bvec_on_stack) 2918 free(bvec, M_TEMP); 2919 return (error); 2920} 2921 2922/* 2923 * NFS advisory byte-level locks. 2924 * Currently unsupported. 2925 */ 2926static int 2927nfs_advlock(ap) 2928 struct vop_advlock_args /* { 2929 struct vnode *a_vp; 2930 caddr_t a_id; 2931 int a_op; 2932 struct flock *a_fl; 2933 int a_flags; 2934 } */ *ap; 2935{ 2936 register struct nfsnode *np = VTONFS(ap->a_vp); 2937 2938 /* 2939 * The following kludge is to allow diskless support to work 2940 * until a real NFS lockd is implemented. Basically, just pretend 2941 * that this is a local lock. 2942 */ 2943 return (lf_advlock(ap, &(np->n_lockf), np->n_size)); 2944} 2945 2946/* 2947 * Print out the contents of an nfsnode. 2948 */ 2949static int 2950nfs_print(ap) 2951 struct vop_print_args /* { 2952 struct vnode *a_vp; 2953 } */ *ap; 2954{ 2955 register struct vnode *vp = ap->a_vp; 2956 register struct nfsnode *np = VTONFS(vp); 2957 2958 printf("tag VT_NFS, fileid %ld fsid 0x%lx", 2959 np->n_vattr.va_fileid, np->n_vattr.va_fsid); 2960 if (vp->v_type == VFIFO) 2961 fifo_printinfo(vp); 2962 printf("\n"); 2963 return (0); 2964} 2965 2966/* 2967 * Just call nfs_writebp() with the force argument set to 1. 2968 */ 2969static int 2970nfs_bwrite(ap) 2971 struct vop_bwrite_args /* { 2972 struct vnode *a_bp; 2973 } */ *ap; 2974{ 2975 2976 return (nfs_writebp(ap->a_bp, 1)); 2977} 2978 2979/* 2980 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless 2981 * the force flag is one and it also handles the B_NEEDCOMMIT flag. 2982 */ 2983int 2984nfs_writebp(bp, force) 2985 register struct buf *bp; 2986 int force; 2987{ 2988 int s; 2989 register int oldflags = bp->b_flags, retv = 1; 2990 off_t off; 2991 2992 if(!(bp->b_flags & B_BUSY)) 2993 panic("bwrite: buffer is not busy???"); 2994 2995 if (bp->b_flags & B_INVAL) 2996 bp->b_flags |= B_INVAL | B_NOCACHE; 2997 2998 if (bp->b_flags & B_DELWRI) { 2999 --numdirtybuffers; 3000 if (needsbuffer) 3001 vfs_bio_need_satisfy(); 3002 } 3003 s = splbio(); /* XXX check if needed */ 3004 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI); 3005 3006 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 3007 reassignbuf(bp, bp->b_vp); 3008 } 3009 3010 bp->b_vp->v_numoutput++; 3011 curproc->p_stats->p_ru.ru_oublock++; 3012 splx(s); 3013 3014 /* 3015 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not 3016 * an actual write will have to be scheduled via. VOP_STRATEGY(). 3017 * If B_WRITEINPROG is already set, then push it with a write anyhow. 3018 */ 3019 vfs_busy_pages(bp, 1); 3020 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { 3021 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 3022 bp->b_flags |= B_WRITEINPROG; 3023 retv = nfs_commit(bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff, 3024 bp->b_wcred, bp->b_proc); 3025 bp->b_flags &= ~B_WRITEINPROG; 3026 if (!retv) { 3027 bp->b_dirtyoff = bp->b_dirtyend = 0; 3028 bp->b_flags &= ~B_NEEDCOMMIT; 3029 biodone(bp); 3030 } else if (retv == NFSERR_STALEWRITEVERF) 3031 nfs_clearcommit(bp->b_vp->v_mount); 3032 } 3033 if (retv) { 3034 if (force) 3035 bp->b_flags |= B_WRITEINPROG; 3036 VOP_STRATEGY(bp); 3037 } 3038 3039 if( (oldflags & B_ASYNC) == 0) { 3040 int rtval = biowait(bp); 3041 3042 if (oldflags & B_DELWRI) { 3043 s = splbio(); 3044 reassignbuf(bp, bp->b_vp); 3045 splx(s); 3046 } 3047 3048 brelse(bp); 3049 return (rtval); 3050 } 3051 3052 return (0); 3053} 3054 3055/* 3056 * nfs special file access vnode op. 3057 * Essentially just get vattr and then imitate iaccess() since the device is 3058 * local to the client. 3059 */ 3060static int 3061nfsspec_access(ap) 3062 struct vop_access_args /* { 3063 struct vnode *a_vp; 3064 int a_mode; 3065 struct ucred *a_cred; 3066 struct proc *a_p; 3067 } */ *ap; 3068{ 3069 register struct vattr *vap; 3070 register gid_t *gp; 3071 register struct ucred *cred = ap->a_cred; 3072 struct vnode *vp = ap->a_vp; 3073 mode_t mode = ap->a_mode; 3074 struct vattr vattr; 3075 register int i; 3076 int error; 3077 3078 /* 3079 * Disallow write attempts on filesystems mounted read-only; 3080 * unless the file is a socket, fifo, or a block or character 3081 * device resident on the filesystem. 3082 */ 3083 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3084 switch (vp->v_type) { 3085 case VREG: 3086 case VDIR: 3087 case VLNK: 3088 return (EROFS); 3089 default: 3090 break; 3091 } 3092 } 3093 /* 3094 * If you're the super-user, 3095 * you always get access. 3096 */ 3097 if (cred->cr_uid == 0) 3098 return (0); 3099 vap = &vattr; 3100 error = VOP_GETATTR(vp, vap, cred, ap->a_p); 3101 if (error) 3102 return (error); 3103 /* 3104 * Access check is based on only one of owner, group, public. 3105 * If not owner, then check group. If not a member of the 3106 * group, then check public access. 3107 */ 3108 if (cred->cr_uid != vap->va_uid) { 3109 mode >>= 3; 3110 gp = cred->cr_groups; 3111 for (i = 0; i < cred->cr_ngroups; i++, gp++) 3112 if (vap->va_gid == *gp) 3113 goto found; 3114 mode >>= 3; 3115found: 3116 ; 3117 } 3118 error = (vap->va_mode & mode) == mode ? 0 : EACCES; 3119 return (error); 3120} 3121 3122/* 3123 * Read wrapper for special devices. 3124 */ 3125static int 3126nfsspec_read(ap) 3127 struct vop_read_args /* { 3128 struct vnode *a_vp; 3129 struct uio *a_uio; 3130 int a_ioflag; 3131 struct ucred *a_cred; 3132 } */ *ap; 3133{ 3134 register struct nfsnode *np = VTONFS(ap->a_vp); 3135 3136 /* 3137 * Set access flag. 3138 */ 3139 np->n_flag |= NACC; 3140 getnanotime(&np->n_atim); 3141 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3142} 3143 3144/* 3145 * Write wrapper for special devices. 3146 */ 3147static int 3148nfsspec_write(ap) 3149 struct vop_write_args /* { 3150 struct vnode *a_vp; 3151 struct uio *a_uio; 3152 int a_ioflag; 3153 struct ucred *a_cred; 3154 } */ *ap; 3155{ 3156 register struct nfsnode *np = VTONFS(ap->a_vp); 3157 3158 /* 3159 * Set update flag. 3160 */ 3161 np->n_flag |= NUPD; 3162 getnanotime(&np->n_mtim); 3163 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3164} 3165 3166/* 3167 * Close wrapper for special devices. 3168 * 3169 * Update the times on the nfsnode then do device close. 3170 */ 3171static int 3172nfsspec_close(ap) 3173 struct vop_close_args /* { 3174 struct vnode *a_vp; 3175 int a_fflag; 3176 struct ucred *a_cred; 3177 struct proc *a_p; 3178 } */ *ap; 3179{ 3180 register struct vnode *vp = ap->a_vp; 3181 register struct nfsnode *np = VTONFS(vp); 3182 struct vattr vattr; 3183 3184 if (np->n_flag & (NACC | NUPD)) { 3185 np->n_flag |= NCHG; 3186 if (vp->v_usecount == 1 && 3187 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3188 VATTR_NULL(&vattr); 3189 if (np->n_flag & NACC) 3190 vattr.va_atime = np->n_atim; 3191 if (np->n_flag & NUPD) 3192 vattr.va_mtime = np->n_mtim; 3193 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3194 } 3195 } 3196 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3197} 3198 3199/* 3200 * Read wrapper for fifos. 3201 */ 3202static int 3203nfsfifo_read(ap) 3204 struct vop_read_args /* { 3205 struct vnode *a_vp; 3206 struct uio *a_uio; 3207 int a_ioflag; 3208 struct ucred *a_cred; 3209 } */ *ap; 3210{ 3211 register struct nfsnode *np = VTONFS(ap->a_vp); 3212 3213 /* 3214 * Set access flag. 3215 */ 3216 np->n_flag |= NACC; 3217 getnanotime(&np->n_atim); 3218 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3219} 3220 3221/* 3222 * Write wrapper for fifos. 3223 */ 3224static int 3225nfsfifo_write(ap) 3226 struct vop_write_args /* { 3227 struct vnode *a_vp; 3228 struct uio *a_uio; 3229 int a_ioflag; 3230 struct ucred *a_cred; 3231 } */ *ap; 3232{ 3233 register struct nfsnode *np = VTONFS(ap->a_vp); 3234 3235 /* 3236 * Set update flag. 3237 */ 3238 np->n_flag |= NUPD; 3239 getnanotime(&np->n_mtim); 3240 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3241} 3242 3243/* 3244 * Close wrapper for fifos. 3245 * 3246 * Update the times on the nfsnode then do fifo close. 3247 */ 3248static int 3249nfsfifo_close(ap) 3250 struct vop_close_args /* { 3251 struct vnode *a_vp; 3252 int a_fflag; 3253 struct ucred *a_cred; 3254 struct proc *a_p; 3255 } */ *ap; 3256{ 3257 register struct vnode *vp = ap->a_vp; 3258 register struct nfsnode *np = VTONFS(vp); 3259 struct vattr vattr; 3260 struct timespec ts; 3261 3262 if (np->n_flag & (NACC | NUPD)) { 3263 getnanotime(&ts); 3264 if (np->n_flag & NACC) 3265 np->n_atim = ts; 3266 if (np->n_flag & NUPD) 3267 np->n_mtim = ts; 3268 np->n_flag |= NCHG; 3269 if (vp->v_usecount == 1 && 3270 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3271 VATTR_NULL(&vattr); 3272 if (np->n_flag & NACC) 3273 vattr.va_atime = np->n_atim; 3274 if (np->n_flag & NUPD) 3275 vattr.va_mtime = np->n_mtim; 3276 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3277 } 3278 } 3279 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3280} 3281