Deleted Added
full compact
nfs_vnops.c (46370) nfs_vnops.c (46568)
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $Id: nfs_vnops.c,v 1.125 1999/05/02 23:56:26 alc Exp $
37 * $Id: nfs_vnops.c,v 1.126 1999/05/03 20:59:14 alc Exp $
38 */
39
40
41/*
42 * vnode op calls for Sun NFS version 2 and 3
43 */
44
45#include "opt_inet.h"
46
47#include <sys/param.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50#include <sys/resourcevar.h>
51#include <sys/proc.h>
52#include <sys/mount.h>
53#include <sys/buf.h>
54#include <sys/malloc.h>
55#include <sys/mbuf.h>
56#include <sys/namei.h>
57#include <sys/socket.h>
58#include <sys/vnode.h>
59#include <sys/dirent.h>
60#include <sys/fcntl.h>
61#include <sys/lockf.h>
62#include <sys/stat.h>
63#include <sys/sysctl.h>
64
65#include <vm/vm.h>
66#include <vm/vm_extern.h>
67#include <vm/vm_zone.h>
68
69#include <miscfs/fifofs/fifo.h>
70#include <miscfs/specfs/specdev.h>
71
72#include <nfs/rpcv2.h>
73#include <nfs/nfsproto.h>
74#include <nfs/nfs.h>
75#include <nfs/nfsnode.h>
76#include <nfs/nfsmount.h>
77#include <nfs/xdr_subs.h>
78#include <nfs/nfsm_subs.h>
79#include <nfs/nqnfs.h>
80
81#include <net/if.h>
82#include <netinet/in.h>
83#include <netinet/in_var.h>
84
85/* Defs */
86#define TRUE 1
87#define FALSE 0
88
89/*
90 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
91 * calls are not in getblk() and brelse() so that they would not be necessary
92 * here.
93 */
94#ifndef B_VMIO
95#define vfs_busy_pages(bp, f)
96#endif
97
98static int nfsspec_read __P((struct vop_read_args *));
99static int nfsspec_write __P((struct vop_write_args *));
100static int nfsfifo_read __P((struct vop_read_args *));
101static int nfsfifo_write __P((struct vop_write_args *));
102static int nfsspec_close __P((struct vop_close_args *));
103static int nfsfifo_close __P((struct vop_close_args *));
104#define nfs_poll vop_nopoll
105static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int));
106static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *));
107static int nfs_lookup __P((struct vop_lookup_args *));
108static int nfs_create __P((struct vop_create_args *));
109static int nfs_mknod __P((struct vop_mknod_args *));
110static int nfs_open __P((struct vop_open_args *));
111static int nfs_close __P((struct vop_close_args *));
112static int nfs_access __P((struct vop_access_args *));
113static int nfs_getattr __P((struct vop_getattr_args *));
114static int nfs_setattr __P((struct vop_setattr_args *));
115static int nfs_read __P((struct vop_read_args *));
116static int nfs_mmap __P((struct vop_mmap_args *));
117static int nfs_fsync __P((struct vop_fsync_args *));
118static int nfs_remove __P((struct vop_remove_args *));
119static int nfs_link __P((struct vop_link_args *));
120static int nfs_rename __P((struct vop_rename_args *));
121static int nfs_mkdir __P((struct vop_mkdir_args *));
122static int nfs_rmdir __P((struct vop_rmdir_args *));
123static int nfs_symlink __P((struct vop_symlink_args *));
124static int nfs_readdir __P((struct vop_readdir_args *));
125static int nfs_bmap __P((struct vop_bmap_args *));
126static int nfs_strategy __P((struct vop_strategy_args *));
127static int nfs_lookitup __P((struct vnode *, const char *, int,
128 struct ucred *, struct proc *, struct nfsnode **));
129static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *));
130static int nfsspec_access __P((struct vop_access_args *));
131static int nfs_readlink __P((struct vop_readlink_args *));
132static int nfs_print __P((struct vop_print_args *));
133static int nfs_advlock __P((struct vop_advlock_args *));
134static int nfs_bwrite __P((struct vop_bwrite_args *));
135/*
136 * Global vfs data structures for nfs
137 */
138vop_t **nfsv2_vnodeop_p;
139static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
140 { &vop_default_desc, (vop_t *) vop_defaultop },
141 { &vop_abortop_desc, (vop_t *) nfs_abortop },
142 { &vop_access_desc, (vop_t *) nfs_access },
143 { &vop_advlock_desc, (vop_t *) nfs_advlock },
144 { &vop_bmap_desc, (vop_t *) nfs_bmap },
145 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
146 { &vop_close_desc, (vop_t *) nfs_close },
147 { &vop_create_desc, (vop_t *) nfs_create },
148 { &vop_fsync_desc, (vop_t *) nfs_fsync },
149 { &vop_getattr_desc, (vop_t *) nfs_getattr },
150 { &vop_getpages_desc, (vop_t *) nfs_getpages },
151 { &vop_putpages_desc, (vop_t *) nfs_putpages },
152 { &vop_inactive_desc, (vop_t *) nfs_inactive },
153 { &vop_lease_desc, (vop_t *) vop_null },
154 { &vop_link_desc, (vop_t *) nfs_link },
155 { &vop_lock_desc, (vop_t *) vop_sharedlock },
156 { &vop_lookup_desc, (vop_t *) nfs_lookup },
157 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
158 { &vop_mknod_desc, (vop_t *) nfs_mknod },
159 { &vop_mmap_desc, (vop_t *) nfs_mmap },
160 { &vop_open_desc, (vop_t *) nfs_open },
161 { &vop_poll_desc, (vop_t *) nfs_poll },
162 { &vop_print_desc, (vop_t *) nfs_print },
163 { &vop_read_desc, (vop_t *) nfs_read },
164 { &vop_readdir_desc, (vop_t *) nfs_readdir },
165 { &vop_readlink_desc, (vop_t *) nfs_readlink },
166 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
167 { &vop_remove_desc, (vop_t *) nfs_remove },
168 { &vop_rename_desc, (vop_t *) nfs_rename },
169 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
170 { &vop_setattr_desc, (vop_t *) nfs_setattr },
171 { &vop_strategy_desc, (vop_t *) nfs_strategy },
172 { &vop_symlink_desc, (vop_t *) nfs_symlink },
173 { &vop_write_desc, (vop_t *) nfs_write },
174 { NULL, NULL }
175};
176static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
177 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
178VNODEOP_SET(nfsv2_vnodeop_opv_desc);
179
180/*
181 * Special device vnode ops
182 */
183vop_t **spec_nfsv2nodeop_p;
184static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
185 { &vop_default_desc, (vop_t *) spec_vnoperate },
186 { &vop_access_desc, (vop_t *) nfsspec_access },
187 { &vop_close_desc, (vop_t *) nfsspec_close },
188 { &vop_fsync_desc, (vop_t *) nfs_fsync },
189 { &vop_getattr_desc, (vop_t *) nfs_getattr },
190 { &vop_inactive_desc, (vop_t *) nfs_inactive },
191 { &vop_lock_desc, (vop_t *) vop_sharedlock },
192 { &vop_print_desc, (vop_t *) nfs_print },
193 { &vop_read_desc, (vop_t *) nfsspec_read },
194 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
195 { &vop_setattr_desc, (vop_t *) nfs_setattr },
196 { &vop_write_desc, (vop_t *) nfsspec_write },
197 { NULL, NULL }
198};
199static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
200 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
201VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
202
203vop_t **fifo_nfsv2nodeop_p;
204static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
205 { &vop_default_desc, (vop_t *) fifo_vnoperate },
206 { &vop_access_desc, (vop_t *) nfsspec_access },
207 { &vop_close_desc, (vop_t *) nfsfifo_close },
208 { &vop_fsync_desc, (vop_t *) nfs_fsync },
209 { &vop_getattr_desc, (vop_t *) nfs_getattr },
210 { &vop_inactive_desc, (vop_t *) nfs_inactive },
211 { &vop_lock_desc, (vop_t *) vop_sharedlock },
212 { &vop_print_desc, (vop_t *) nfs_print },
213 { &vop_read_desc, (vop_t *) nfsfifo_read },
214 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
215 { &vop_setattr_desc, (vop_t *) nfs_setattr },
216 { &vop_write_desc, (vop_t *) nfsfifo_write },
217 { NULL, NULL }
218};
219static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
220 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
221VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
222
223static int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt,
224 struct ucred *cred, struct proc *procp));
225static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp,
226 struct componentname *cnp,
227 struct vattr *vap));
228static int nfs_removerpc __P((struct vnode *dvp, const char *name,
229 int namelen,
230 struct ucred *cred, struct proc *proc));
231static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr,
232 int fnamelen, struct vnode *tdvp,
233 const char *tnameptr, int tnamelen,
234 struct ucred *cred, struct proc *proc));
235static int nfs_renameit __P((struct vnode *sdvp,
236 struct componentname *scnp,
237 struct sillyrename *sp));
238
239/*
240 * Global variables
241 */
242extern u_int32_t nfs_true, nfs_false;
243extern u_int32_t nfs_xdrneg1;
244extern struct nfsstats nfsstats;
245extern nfstype nfsv3_type[9];
246struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
247struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
248int nfs_numasync = 0;
249#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
250
251SYSCTL_DECL(_vfs_nfs);
252
253static int nfsaccess_cache_timeout = 2;
254SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
255 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
256
257static int nfsaccess_cache_hits;
258SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
259 &nfsaccess_cache_hits, 0, "NFS ACCESS cache hit count");
260
261static int nfsaccess_cache_fills;
262SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_fills, CTLFLAG_RD,
263 &nfsaccess_cache_fills, 0, "NFS ACCESS cache fill count");
264
265/*
266 * nfs access vnode op.
267 * For nfs version 2, just return ok. File accesses may fail later.
268 * For nfs version 3, use the access rpc to check accessibility. If file modes
269 * are changed on the server, accesses might still fail later.
270 */
271static int
272nfs_access(ap)
273 struct vop_access_args /* {
274 struct vnode *a_vp;
275 int a_mode;
276 struct ucred *a_cred;
277 struct proc *a_p;
278 } */ *ap;
279{
280 register struct vnode *vp = ap->a_vp;
281 register u_int32_t *tl;
282 register caddr_t cp;
283 register int32_t t1, t2;
284 caddr_t bpos, dpos, cp2;
285 int error = 0, attrflag;
286 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
287 u_int32_t mode, rmode, wmode;
288 int v3 = NFS_ISV3(vp);
289 struct nfsnode *np = VTONFS(vp);
290
291 /*
292 * Disallow write attempts on filesystems mounted read-only;
293 * unless the file is a socket, fifo, or a block or character
294 * device resident on the filesystem.
295 */
296 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
297 switch (vp->v_type) {
298 case VREG:
299 case VDIR:
300 case VLNK:
301 return (EROFS);
302 default:
303 break;
304 }
305 }
306 /*
307 * For nfs v3, check to see if we have done this recently, and if
308 * so return our cached result instead of making an ACCESS call.
309 * If not, do an access rpc, otherwise you are stuck emulating
310 * ufs_access() locally using the vattr. This may not be correct,
311 * since the server may apply other access criteria such as
312 * client uid-->server uid mapping that we do not know about.
313 */
314 if (v3) {
315 if (ap->a_mode & VREAD)
316 mode = NFSV3ACCESS_READ;
317 else
318 mode = 0;
319 if (vp->v_type != VDIR) {
320 if (ap->a_mode & VWRITE)
321 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
322 if (ap->a_mode & VEXEC)
323 mode |= NFSV3ACCESS_EXECUTE;
324 } else {
325 if (ap->a_mode & VWRITE)
326 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
327 NFSV3ACCESS_DELETE);
328 if (ap->a_mode & VEXEC)
329 mode |= NFSV3ACCESS_LOOKUP;
330 }
331 /* XXX safety belt, only make blanket request if caching */
332 if (nfsaccess_cache_timeout > 0) {
333 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
334 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
335 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
336 } else {
337 wmode = mode;
338 }
339
340 /*
341 * Does our cached result allow us to give a definite yes to
342 * this request?
343 */
344 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
345 (ap->a_cred->cr_uid == np->n_modeuid) &&
346 ((np->n_mode & mode) == mode)) {
347 nfsaccess_cache_hits++;
348 } else {
349 /*
350 * Either a no, or a don't know. Go to the wire.
351 */
352 nfsstats.rpccnt[NFSPROC_ACCESS]++;
353 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
354 nfsm_fhtom(vp, v3);
355 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
356 *tl = txdr_unsigned(wmode);
357 nfsm_request(vp, NFSPROC_ACCESS, ap->a_p, ap->a_cred);
358 nfsm_postop_attr(vp, attrflag);
359 if (!error) {
360 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
361 rmode = fxdr_unsigned(u_int32_t, *tl);
362 /*
363 * The NFS V3 spec does not clarify whether or not
364 * the returned access bits can be a superset of
365 * the ones requested, so...
366 */
367 if ((rmode & mode) != mode) {
368 error = EACCES;
369 } else if (nfsaccess_cache_timeout > 0) {
370 /* cache the result */
371 nfsaccess_cache_fills++;
372 np->n_mode = rmode;
373 np->n_modeuid = ap->a_cred->cr_uid;
374 np->n_modestamp = time_second;
375 }
376 }
377 nfsm_reqdone;
378 }
379 return (error);
380 } else {
381 if ((error = nfsspec_access(ap)) != 0)
382 return (error);
383
384 /*
385 * Attempt to prevent a mapped root from accessing a file
386 * which it shouldn't. We try to read a byte from the file
387 * if the user is root and the file is not zero length.
388 * After calling nfsspec_access, we should have the correct
389 * file size cached.
390 */
391 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
392 && VTONFS(vp)->n_size > 0) {
393 struct iovec aiov;
394 struct uio auio;
395 char buf[1];
396
397 aiov.iov_base = buf;
398 aiov.iov_len = 1;
399 auio.uio_iov = &aiov;
400 auio.uio_iovcnt = 1;
401 auio.uio_offset = 0;
402 auio.uio_resid = 1;
403 auio.uio_segflg = UIO_SYSSPACE;
404 auio.uio_rw = UIO_READ;
405 auio.uio_procp = ap->a_p;
406
407 if (vp->v_type == VREG)
408 error = nfs_readrpc(vp, &auio, ap->a_cred);
409 else if (vp->v_type == VDIR) {
410 char* bp;
411 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
412 aiov.iov_base = bp;
413 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
414 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
415 free(bp, M_TEMP);
416 } else if (vp->v_type == VLNK)
417 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
418 else
419 error = EACCES;
420 }
421 return (error);
422 }
423}
424
425/*
426 * nfs open vnode op
427 * Check to see if the type is ok
428 * and that deletion is not in progress.
429 * For paged in text files, you will need to flush the page cache
430 * if consistency is lost.
431 */
432/* ARGSUSED */
433static int
434nfs_open(ap)
435 struct vop_open_args /* {
436 struct vnode *a_vp;
437 int a_mode;
438 struct ucred *a_cred;
439 struct proc *a_p;
440 } */ *ap;
441{
442 register struct vnode *vp = ap->a_vp;
443 struct nfsnode *np = VTONFS(vp);
444 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
445 struct vattr vattr;
446 int error;
447
448 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
449#ifdef DIAGNOSTIC
450 printf("open eacces vtyp=%d\n",vp->v_type);
451#endif
452 return (EACCES);
453 }
454 /*
455 * Get a valid lease. If cached data is stale, flush it.
456 */
457 if (nmp->nm_flag & NFSMNT_NQNFS) {
458 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
459 do {
460 error = nqnfs_getlease(vp, ND_READ, ap->a_cred,
461 ap->a_p);
462 } while (error == NQNFS_EXPIRED);
463 if (error)
464 return (error);
465 if (np->n_lrev != np->n_brev ||
466 (np->n_flag & NQNFSNONCACHE)) {
467 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
468 ap->a_p, 1)) == EINTR)
469 return (error);
470 np->n_brev = np->n_lrev;
471 }
472 }
473 } else {
474 if (np->n_flag & NMODIFIED) {
475 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
476 ap->a_p, 1)) == EINTR)
477 return (error);
478 np->n_attrstamp = 0;
479 if (vp->v_type == VDIR)
480 np->n_direofoffset = 0;
481 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
482 if (error)
483 return (error);
484 np->n_mtime = vattr.va_mtime.tv_sec;
485 } else {
486 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
487 if (error)
488 return (error);
489 if (np->n_mtime != vattr.va_mtime.tv_sec) {
490 if (vp->v_type == VDIR)
491 np->n_direofoffset = 0;
492 if ((error = nfs_vinvalbuf(vp, V_SAVE,
493 ap->a_cred, ap->a_p, 1)) == EINTR)
494 return (error);
495 np->n_mtime = vattr.va_mtime.tv_sec;
496 }
497 }
498 }
499 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
500 np->n_attrstamp = 0; /* For Open/Close consistency */
501 return (0);
502}
503
504/*
505 * nfs close vnode op
506 * What an NFS client should do upon close after writing is a debatable issue.
507 * Most NFS clients push delayed writes to the server upon close, basically for
508 * two reasons:
509 * 1 - So that any write errors may be reported back to the client process
510 * doing the close system call. By far the two most likely errors are
511 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
512 * 2 - To put a worst case upper bound on cache inconsistency between
513 * multiple clients for the file.
514 * There is also a consistency problem for Version 2 of the protocol w.r.t.
515 * not being able to tell if other clients are writing a file concurrently,
516 * since there is no way of knowing if the changed modify time in the reply
517 * is only due to the write for this client.
518 * (NFS Version 3 provides weak cache consistency data in the reply that
519 * should be sufficient to detect and handle this case.)
520 *
521 * The current code does the following:
522 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
523 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
524 * or commit them (this satisfies 1 and 2 except for the
525 * case where the server crashes after this close but
526 * before the commit RPC, which is felt to be "good
527 * enough". Changing the last argument to nfs_flush() to
528 * a 1 would force a commit operation, if it is felt a
529 * commit is necessary now.
530 * for NQNFS - do nothing now, since 2 is dealt with via leases and
531 * 1 should be dealt with via an fsync() system call for
532 * cases where write errors are important.
533 */
534/* ARGSUSED */
535static int
536nfs_close(ap)
537 struct vop_close_args /* {
538 struct vnodeop_desc *a_desc;
539 struct vnode *a_vp;
540 int a_fflag;
541 struct ucred *a_cred;
542 struct proc *a_p;
543 } */ *ap;
544{
545 register struct vnode *vp = ap->a_vp;
546 register struct nfsnode *np = VTONFS(vp);
547 int error = 0;
548
549 if (vp->v_type == VREG) {
550 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
551 (np->n_flag & NMODIFIED)) {
552 if (NFS_ISV3(vp)) {
553 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0);
554 np->n_flag &= ~NMODIFIED;
555 } else
556 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
557 np->n_attrstamp = 0;
558 }
559 if (np->n_flag & NWRITEERR) {
560 np->n_flag &= ~NWRITEERR;
561 error = np->n_error;
562 }
563 }
564 return (error);
565}
566
567/*
568 * nfs getattr call from vfs.
569 */
570static int
571nfs_getattr(ap)
572 struct vop_getattr_args /* {
573 struct vnode *a_vp;
574 struct vattr *a_vap;
575 struct ucred *a_cred;
576 struct proc *a_p;
577 } */ *ap;
578{
579 register struct vnode *vp = ap->a_vp;
580 register struct nfsnode *np = VTONFS(vp);
581 register caddr_t cp;
582 register u_int32_t *tl;
583 register int32_t t1, t2;
584 caddr_t bpos, dpos;
585 int error = 0;
586 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
587 int v3 = NFS_ISV3(vp);
588
589 /*
590 * Update local times for special files.
591 */
592 if (np->n_flag & (NACC | NUPD))
593 np->n_flag |= NCHG;
594 /*
595 * First look in the cache.
596 */
597 if (nfs_getattrcache(vp, ap->a_vap) == 0)
598 return (0);
599 nfsstats.rpccnt[NFSPROC_GETATTR]++;
600 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
601 nfsm_fhtom(vp, v3);
602 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred);
603 if (!error) {
604 nfsm_loadattr(vp, ap->a_vap);
605 }
606 nfsm_reqdone;
607 return (error);
608}
609
610/*
611 * nfs setattr call.
612 */
613static int
614nfs_setattr(ap)
615 struct vop_setattr_args /* {
616 struct vnodeop_desc *a_desc;
617 struct vnode *a_vp;
618 struct vattr *a_vap;
619 struct ucred *a_cred;
620 struct proc *a_p;
621 } */ *ap;
622{
623 register struct vnode *vp = ap->a_vp;
624 register struct nfsnode *np = VTONFS(vp);
625 register struct vattr *vap = ap->a_vap;
626 int error = 0;
627 u_quad_t tsize;
628
629#ifndef nolint
630 tsize = (u_quad_t)0;
631#endif
632
633 /*
634 * Setting of flags is not supported.
635 */
636 if (vap->va_flags != VNOVAL)
637 return (EOPNOTSUPP);
638
639 /*
640 * Disallow write attempts if the filesystem is mounted read-only.
641 */
642 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
643 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
644 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
645 (vp->v_mount->mnt_flag & MNT_RDONLY))
646 return (EROFS);
647 if (vap->va_size != VNOVAL) {
648 switch (vp->v_type) {
649 case VDIR:
650 return (EISDIR);
651 case VCHR:
652 case VBLK:
653 case VSOCK:
654 case VFIFO:
655 if (vap->va_mtime.tv_sec == VNOVAL &&
656 vap->va_atime.tv_sec == VNOVAL &&
657 vap->va_mode == (mode_t)VNOVAL &&
658 vap->va_uid == (uid_t)VNOVAL &&
659 vap->va_gid == (gid_t)VNOVAL)
660 return (0);
661 vap->va_size = VNOVAL;
662 break;
663 default:
664 /*
665 * Disallow write attempts if the filesystem is
666 * mounted read-only.
667 */
668 if (vp->v_mount->mnt_flag & MNT_RDONLY)
669 return (EROFS);
670 vnode_pager_setsize(vp, vap->va_size);
671 if (np->n_flag & NMODIFIED) {
672 if (vap->va_size == 0)
673 error = nfs_vinvalbuf(vp, 0,
674 ap->a_cred, ap->a_p, 1);
675 else
676 error = nfs_vinvalbuf(vp, V_SAVE,
677 ap->a_cred, ap->a_p, 1);
678 if (error) {
679 vnode_pager_setsize(vp, np->n_size);
680 return (error);
681 }
682 }
683 tsize = np->n_size;
684 np->n_size = np->n_vattr.va_size = vap->va_size;
685 };
686 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
687 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
688 vp->v_type == VREG &&
689 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
690 ap->a_p, 1)) == EINTR)
691 return (error);
692 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
693 if (error && vap->va_size != VNOVAL) {
694 np->n_size = np->n_vattr.va_size = tsize;
695 vnode_pager_setsize(vp, np->n_size);
696 }
697 return (error);
698}
699
700/*
701 * Do an nfs setattr rpc.
702 */
703static int
704nfs_setattrrpc(vp, vap, cred, procp)
705 register struct vnode *vp;
706 register struct vattr *vap;
707 struct ucred *cred;
708 struct proc *procp;
709{
710 register struct nfsv2_sattr *sp;
711 register caddr_t cp;
712 register int32_t t1, t2;
713 caddr_t bpos, dpos, cp2;
714 u_int32_t *tl;
715 int error = 0, wccflag = NFSV3_WCCRATTR;
716 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
717 int v3 = NFS_ISV3(vp);
718
719 nfsstats.rpccnt[NFSPROC_SETATTR]++;
720 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
721 nfsm_fhtom(vp, v3);
722 if (v3) {
723 nfsm_v3attrbuild(vap, TRUE);
724 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
725 *tl = nfs_false;
726 } else {
727 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
728 if (vap->va_mode == (mode_t)VNOVAL)
729 sp->sa_mode = nfs_xdrneg1;
730 else
731 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
732 if (vap->va_uid == (uid_t)VNOVAL)
733 sp->sa_uid = nfs_xdrneg1;
734 else
735 sp->sa_uid = txdr_unsigned(vap->va_uid);
736 if (vap->va_gid == (gid_t)VNOVAL)
737 sp->sa_gid = nfs_xdrneg1;
738 else
739 sp->sa_gid = txdr_unsigned(vap->va_gid);
740 sp->sa_size = txdr_unsigned(vap->va_size);
741 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
742 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
743 }
744 nfsm_request(vp, NFSPROC_SETATTR, procp, cred);
745 if (v3) {
746 nfsm_wcc_data(vp, wccflag);
747 } else
748 nfsm_loadattr(vp, (struct vattr *)0);
749 nfsm_reqdone;
750 return (error);
751}
752
753/*
754 * nfs lookup call, one step at a time...
755 * First look in cache
756 * If not found, unlock the directory nfsnode and do the rpc
757 */
758static int
759nfs_lookup(ap)
760 struct vop_lookup_args /* {
761 struct vnodeop_desc *a_desc;
762 struct vnode *a_dvp;
763 struct vnode **a_vpp;
764 struct componentname *a_cnp;
765 } */ *ap;
766{
767 struct componentname *cnp = ap->a_cnp;
768 struct vnode *dvp = ap->a_dvp;
769 struct vnode **vpp = ap->a_vpp;
770 int flags = cnp->cn_flags;
771 struct vnode *newvp;
772 u_int32_t *tl;
773 caddr_t cp;
774 int32_t t1, t2;
775 struct nfsmount *nmp;
776 caddr_t bpos, dpos, cp2;
777 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
778 long len;
779 nfsfh_t *fhp;
780 struct nfsnode *np;
781 int lockparent, wantparent, error = 0, attrflag, fhsize;
782 int v3 = NFS_ISV3(dvp);
783 struct proc *p = cnp->cn_proc;
784
785 *vpp = NULLVP;
786 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
787 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
788 return (EROFS);
789 if (dvp->v_type != VDIR)
790 return (ENOTDIR);
791 lockparent = flags & LOCKPARENT;
792 wantparent = flags & (LOCKPARENT|WANTPARENT);
793 nmp = VFSTONFS(dvp->v_mount);
794 np = VTONFS(dvp);
795 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
796 struct vattr vattr;
797 int vpid;
798
799 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) != 0) {
800 *vpp = NULLVP;
801 return (error);
802 }
803
804 newvp = *vpp;
805 vpid = newvp->v_id;
806 /*
807 * See the comment starting `Step through' in ufs/ufs_lookup.c
808 * for an explanation of the locking protocol
809 */
810 if (dvp == newvp) {
811 VREF(newvp);
812 error = 0;
813 } else if (flags & ISDOTDOT) {
814 VOP_UNLOCK(dvp, 0, p);
815 error = vget(newvp, LK_EXCLUSIVE, p);
816 if (!error && lockparent && (flags & ISLASTCN))
817 error = vn_lock(dvp, LK_EXCLUSIVE, p);
818 } else {
819 error = vget(newvp, LK_EXCLUSIVE, p);
820 if (!lockparent || error || !(flags & ISLASTCN))
821 VOP_UNLOCK(dvp, 0, p);
822 }
823 if (!error) {
824 if (vpid == newvp->v_id) {
825 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p)
826 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
827 nfsstats.lookupcache_hits++;
828 if (cnp->cn_nameiop != LOOKUP &&
829 (flags & ISLASTCN))
830 cnp->cn_flags |= SAVENAME;
831 return (0);
832 }
833 cache_purge(newvp);
834 }
835 vput(newvp);
836 if (lockparent && dvp != newvp && (flags & ISLASTCN))
837 VOP_UNLOCK(dvp, 0, p);
838 }
839 error = vn_lock(dvp, LK_EXCLUSIVE, p);
840 *vpp = NULLVP;
841 if (error)
842 return (error);
843 }
844 error = 0;
845 newvp = NULLVP;
846 nfsstats.lookupcache_misses++;
847 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
848 len = cnp->cn_namelen;
849 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
850 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
851 nfsm_fhtom(dvp, v3);
852 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
853 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred);
854 if (error) {
855 nfsm_postop_attr(dvp, attrflag);
856 m_freem(mrep);
857 goto nfsmout;
858 }
859 nfsm_getfh(fhp, fhsize, v3);
860
861 /*
862 * Handle RENAME case...
863 */
864 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
865 if (NFS_CMPFH(np, fhp, fhsize)) {
866 m_freem(mrep);
867 return (EISDIR);
868 }
869 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
870 if (error) {
871 m_freem(mrep);
872 return (error);
873 }
874 newvp = NFSTOV(np);
875 if (v3) {
876 nfsm_postop_attr(newvp, attrflag);
877 nfsm_postop_attr(dvp, attrflag);
878 } else
879 nfsm_loadattr(newvp, (struct vattr *)0);
880 *vpp = newvp;
881 m_freem(mrep);
882 cnp->cn_flags |= SAVENAME;
883 if (!lockparent)
884 VOP_UNLOCK(dvp, 0, p);
885 return (0);
886 }
887
888 if (flags & ISDOTDOT) {
889 VOP_UNLOCK(dvp, 0, p);
890 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
891 if (error) {
892 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
893 return (error);
894 }
895 newvp = NFSTOV(np);
896 if (lockparent && (flags & ISLASTCN) &&
897 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
898 vput(newvp);
899 return (error);
900 }
901 } else if (NFS_CMPFH(np, fhp, fhsize)) {
902 VREF(dvp);
903 newvp = dvp;
904 } else {
905 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
906 if (error) {
907 m_freem(mrep);
908 return (error);
909 }
910 if (!lockparent || !(flags & ISLASTCN))
911 VOP_UNLOCK(dvp, 0, p);
912 newvp = NFSTOV(np);
913 }
914 if (v3) {
915 nfsm_postop_attr(newvp, attrflag);
916 nfsm_postop_attr(dvp, attrflag);
917 } else
918 nfsm_loadattr(newvp, (struct vattr *)0);
919 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
920 cnp->cn_flags |= SAVENAME;
921 if ((cnp->cn_flags & MAKEENTRY) &&
922 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
923 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
924 cache_enter(dvp, newvp, cnp);
925 }
926 *vpp = newvp;
927 nfsm_reqdone;
928 if (error) {
929 if (newvp != NULLVP) {
930 vrele(newvp);
931 *vpp = NULLVP;
932 }
933 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
934 (flags & ISLASTCN) && error == ENOENT) {
935 if (!lockparent)
936 VOP_UNLOCK(dvp, 0, p);
937 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
938 error = EROFS;
939 else
940 error = EJUSTRETURN;
941 }
942 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
943 cnp->cn_flags |= SAVENAME;
944 }
945 return (error);
946}
947
948/*
949 * nfs read call.
950 * Just call nfs_bioread() to do the work.
951 */
952static int
953nfs_read(ap)
954 struct vop_read_args /* {
955 struct vnode *a_vp;
956 struct uio *a_uio;
957 int a_ioflag;
958 struct ucred *a_cred;
959 } */ *ap;
960{
961 register struct vnode *vp = ap->a_vp;
962
963 if (vp->v_type != VREG)
964 return (EPERM);
965 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
966}
967
968/*
969 * nfs readlink call
970 */
971static int
972nfs_readlink(ap)
973 struct vop_readlink_args /* {
974 struct vnode *a_vp;
975 struct uio *a_uio;
976 struct ucred *a_cred;
977 } */ *ap;
978{
979 register struct vnode *vp = ap->a_vp;
980
981 if (vp->v_type != VLNK)
982 return (EINVAL);
983 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
984}
985
986/*
987 * Do a readlink rpc.
988 * Called by nfs_doio() from below the buffer cache.
989 */
990int
991nfs_readlinkrpc(vp, uiop, cred)
992 register struct vnode *vp;
993 struct uio *uiop;
994 struct ucred *cred;
995{
996 register u_int32_t *tl;
997 register caddr_t cp;
998 register int32_t t1, t2;
999 caddr_t bpos, dpos, cp2;
1000 int error = 0, len, attrflag;
1001 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1002 int v3 = NFS_ISV3(vp);
1003
1004 nfsstats.rpccnt[NFSPROC_READLINK]++;
1005 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1006 nfsm_fhtom(vp, v3);
1007 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred);
1008 if (v3)
1009 nfsm_postop_attr(vp, attrflag);
1010 if (!error) {
1011 nfsm_strsiz(len, NFS_MAXPATHLEN);
1012 nfsm_mtouio(uiop, len);
1013 }
1014 nfsm_reqdone;
1015 return (error);
1016}
1017
1018/*
1019 * nfs read rpc call
1020 * Ditto above
1021 */
1022int
1023nfs_readrpc(vp, uiop, cred)
1024 register struct vnode *vp;
1025 struct uio *uiop;
1026 struct ucred *cred;
1027{
1028 register u_int32_t *tl;
1029 register caddr_t cp;
1030 register int32_t t1, t2;
1031 caddr_t bpos, dpos, cp2;
1032 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1033 struct nfsmount *nmp;
1034 int error = 0, len, retlen, tsiz, eof, attrflag;
1035 int v3 = NFS_ISV3(vp);
1036
1037#ifndef nolint
1038 eof = 0;
1039#endif
1040 nmp = VFSTONFS(vp->v_mount);
1041 tsiz = uiop->uio_resid;
1042 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1043 return (EFBIG);
1044 while (tsiz > 0) {
1045 nfsstats.rpccnt[NFSPROC_READ]++;
1046 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1047 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1048 nfsm_fhtom(vp, v3);
1049 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1050 if (v3) {
1051 txdr_hyper(&uiop->uio_offset, tl);
1052 *(tl + 2) = txdr_unsigned(len);
1053 } else {
1054 *tl++ = txdr_unsigned(uiop->uio_offset);
1055 *tl++ = txdr_unsigned(len);
1056 *tl = 0;
1057 }
1058 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred);
1059 if (v3) {
1060 nfsm_postop_attr(vp, attrflag);
1061 if (error) {
1062 m_freem(mrep);
1063 goto nfsmout;
1064 }
1065 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1066 eof = fxdr_unsigned(int, *(tl + 1));
1067 } else
1068 nfsm_loadattr(vp, (struct vattr *)0);
1069 nfsm_strsiz(retlen, nmp->nm_rsize);
1070 nfsm_mtouio(uiop, retlen);
1071 m_freem(mrep);
1072 tsiz -= retlen;
1073 if (v3) {
1074 if (eof || retlen == 0)
1075 tsiz = 0;
1076 } else if (retlen < len)
1077 tsiz = 0;
1078 }
1079nfsmout:
1080 return (error);
1081}
1082
1083/*
1084 * nfs write call
1085 */
1086int
1087nfs_writerpc(vp, uiop, cred, iomode, must_commit)
1088 register struct vnode *vp;
1089 register struct uio *uiop;
1090 struct ucred *cred;
1091 int *iomode, *must_commit;
1092{
1093 register u_int32_t *tl;
1094 register caddr_t cp;
1095 register int32_t t1, t2, backup;
1096 caddr_t bpos, dpos, cp2;
1097 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1098 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1099 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1100 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1101
1102#ifndef DIAGNOSTIC
1103 if (uiop->uio_iovcnt != 1)
1104 panic("nfs: writerpc iovcnt > 1");
1105#endif
1106 *must_commit = 0;
1107 tsiz = uiop->uio_resid;
1108 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1109 return (EFBIG);
1110 while (tsiz > 0) {
1111 nfsstats.rpccnt[NFSPROC_WRITE]++;
1112 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1113 nfsm_reqhead(vp, NFSPROC_WRITE,
1114 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1115 nfsm_fhtom(vp, v3);
1116 if (v3) {
1117 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1118 txdr_hyper(&uiop->uio_offset, tl);
1119 tl += 2;
1120 *tl++ = txdr_unsigned(len);
1121 *tl++ = txdr_unsigned(*iomode);
1122 *tl = txdr_unsigned(len);
1123 } else {
1124 register u_int32_t x;
1125
1126 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1127 /* Set both "begin" and "current" to non-garbage. */
1128 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1129 *tl++ = x; /* "begin offset" */
1130 *tl++ = x; /* "current offset" */
1131 x = txdr_unsigned(len);
1132 *tl++ = x; /* total to this offset */
1133 *tl = x; /* size of this write */
1134 }
1135 nfsm_uiotom(uiop, len);
1136 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred);
1137 if (v3) {
1138 wccflag = NFSV3_WCCCHK;
1139 nfsm_wcc_data(vp, wccflag);
1140 if (!error) {
1141 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1142 + NFSX_V3WRITEVERF);
1143 rlen = fxdr_unsigned(int, *tl++);
1144 if (rlen == 0) {
1145 error = NFSERR_IO;
1146 m_freem(mrep);
1147 break;
1148 } else if (rlen < len) {
1149 backup = len - rlen;
1150 uiop->uio_iov->iov_base -= backup;
1151 uiop->uio_iov->iov_len += backup;
1152 uiop->uio_offset -= backup;
1153 uiop->uio_resid += backup;
1154 len = rlen;
1155 }
1156 commit = fxdr_unsigned(int, *tl++);
1157
1158 /*
1159 * Return the lowest committment level
1160 * obtained by any of the RPCs.
1161 */
1162 if (committed == NFSV3WRITE_FILESYNC)
1163 committed = commit;
1164 else if (committed == NFSV3WRITE_DATASYNC &&
1165 commit == NFSV3WRITE_UNSTABLE)
1166 committed = commit;
1167 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1168 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1169 NFSX_V3WRITEVERF);
1170 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1171 } else if (bcmp((caddr_t)tl,
1172 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1173 *must_commit = 1;
1174 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1175 NFSX_V3WRITEVERF);
1176 }
1177 }
1178 } else
1179 nfsm_loadattr(vp, (struct vattr *)0);
1180 if (wccflag)
1181 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1182 m_freem(mrep);
1183 if (error)
1184 break;
1185 tsiz -= len;
1186 }
1187nfsmout:
1188 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1189 committed = NFSV3WRITE_FILESYNC;
1190 *iomode = committed;
1191 if (error)
1192 uiop->uio_resid = tsiz;
1193 return (error);
1194}
1195
1196/*
1197 * nfs mknod rpc
1198 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1199 * mode set to specify the file type and the size field for rdev.
1200 */
1201static int
1202nfs_mknodrpc(dvp, vpp, cnp, vap)
1203 register struct vnode *dvp;
1204 register struct vnode **vpp;
1205 register struct componentname *cnp;
1206 register struct vattr *vap;
1207{
1208 register struct nfsv2_sattr *sp;
1209 register u_int32_t *tl;
1210 register caddr_t cp;
1211 register int32_t t1, t2;
1212 struct vnode *newvp = (struct vnode *)0;
1213 struct nfsnode *np = (struct nfsnode *)0;
1214 struct vattr vattr;
1215 char *cp2;
1216 caddr_t bpos, dpos;
1217 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1218 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1219 u_int32_t rdev;
1220 int v3 = NFS_ISV3(dvp);
1221
1222 if (vap->va_type == VCHR || vap->va_type == VBLK)
1223 rdev = txdr_unsigned(vap->va_rdev);
1224 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1225 rdev = nfs_xdrneg1;
1226 else {
1227 VOP_ABORTOP(dvp, cnp);
1228 return (EOPNOTSUPP);
1229 }
1230 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1231 VOP_ABORTOP(dvp, cnp);
1232 return (error);
1233 }
1234 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1235 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1236 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1237 nfsm_fhtom(dvp, v3);
1238 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1239 if (v3) {
1240 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1241 *tl++ = vtonfsv3_type(vap->va_type);
1242 nfsm_v3attrbuild(vap, FALSE);
1243 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1244 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1245 *tl++ = txdr_unsigned(major(vap->va_rdev));
1246 *tl = txdr_unsigned(minor(vap->va_rdev));
1247 }
1248 } else {
1249 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1250 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1251 sp->sa_uid = nfs_xdrneg1;
1252 sp->sa_gid = nfs_xdrneg1;
1253 sp->sa_size = rdev;
1254 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1255 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1256 }
1257 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred);
1258 if (!error) {
1259 nfsm_mtofh(dvp, newvp, v3, gotvp);
1260 if (!gotvp) {
1261 if (newvp) {
1262 vput(newvp);
1263 newvp = (struct vnode *)0;
1264 }
1265 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1266 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1267 if (!error)
1268 newvp = NFSTOV(np);
1269 }
1270 }
1271 if (v3)
1272 nfsm_wcc_data(dvp, wccflag);
1273 nfsm_reqdone;
1274 if (error) {
1275 if (newvp)
1276 vput(newvp);
1277 } else {
1278 if (cnp->cn_flags & MAKEENTRY)
1279 cache_enter(dvp, newvp, cnp);
1280 *vpp = newvp;
1281 }
1282 zfree(namei_zone, cnp->cn_pnbuf);
1283 VTONFS(dvp)->n_flag |= NMODIFIED;
1284 if (!wccflag)
1285 VTONFS(dvp)->n_attrstamp = 0;
1286 return (error);
1287}
1288
1289/*
1290 * nfs mknod vop
1291 * just call nfs_mknodrpc() to do the work.
1292 */
1293/* ARGSUSED */
1294static int
1295nfs_mknod(ap)
1296 struct vop_mknod_args /* {
1297 struct vnode *a_dvp;
1298 struct vnode **a_vpp;
1299 struct componentname *a_cnp;
1300 struct vattr *a_vap;
1301 } */ *ap;
1302{
1303 struct vnode *newvp;
1304 int error;
1305
1306 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap);
1307 if (!error)
1308 vput(newvp);
1309 return (error);
1310}
1311
1312static u_long create_verf;
1313/*
1314 * nfs file create call
1315 */
1316static int
1317nfs_create(ap)
1318 struct vop_create_args /* {
1319 struct vnode *a_dvp;
1320 struct vnode **a_vpp;
1321 struct componentname *a_cnp;
1322 struct vattr *a_vap;
1323 } */ *ap;
1324{
1325 register struct vnode *dvp = ap->a_dvp;
1326 register struct vattr *vap = ap->a_vap;
1327 register struct componentname *cnp = ap->a_cnp;
1328 register struct nfsv2_sattr *sp;
1329 register u_int32_t *tl;
1330 register caddr_t cp;
1331 register int32_t t1, t2;
1332 struct nfsnode *np = (struct nfsnode *)0;
1333 struct vnode *newvp = (struct vnode *)0;
1334 caddr_t bpos, dpos, cp2;
1335 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1336 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1337 struct vattr vattr;
1338 int v3 = NFS_ISV3(dvp);
1339
1340 /*
1341 * Oops, not for me..
1342 */
1343 if (vap->va_type == VSOCK)
1344 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1345
1346 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1347 VOP_ABORTOP(dvp, cnp);
1348 return (error);
1349 }
1350 if (vap->va_vaflags & VA_EXCLUSIVE)
1351 fmode |= O_EXCL;
1352again:
1353 nfsstats.rpccnt[NFSPROC_CREATE]++;
1354 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1355 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1356 nfsm_fhtom(dvp, v3);
1357 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1358 if (v3) {
1359 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1360 if (fmode & O_EXCL) {
1361 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1362 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1363#ifdef INET
1364 if (!TAILQ_EMPTY(&in_ifaddrhead))
1365 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1366 else
1367#endif
1368 *tl++ = create_verf;
1369 *tl = ++create_verf;
1370 } else {
1371 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1372 nfsm_v3attrbuild(vap, FALSE);
1373 }
1374 } else {
1375 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1376 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1377 sp->sa_uid = nfs_xdrneg1;
1378 sp->sa_gid = nfs_xdrneg1;
1379 sp->sa_size = 0;
1380 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1381 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1382 }
1383 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred);
1384 if (!error) {
1385 nfsm_mtofh(dvp, newvp, v3, gotvp);
1386 if (!gotvp) {
1387 if (newvp) {
1388 vput(newvp);
1389 newvp = (struct vnode *)0;
1390 }
1391 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1392 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1393 if (!error)
1394 newvp = NFSTOV(np);
1395 }
1396 }
1397 if (v3)
1398 nfsm_wcc_data(dvp, wccflag);
1399 nfsm_reqdone;
1400 if (error) {
1401 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1402 fmode &= ~O_EXCL;
1403 goto again;
1404 }
1405 if (newvp)
1406 vput(newvp);
1407 } else if (v3 && (fmode & O_EXCL))
1408 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1409 if (!error) {
1410 if (cnp->cn_flags & MAKEENTRY)
1411 cache_enter(dvp, newvp, cnp);
1412 *ap->a_vpp = newvp;
1413 }
1414 if (error || (cnp->cn_flags & SAVESTART) == 0)
1415 zfree(namei_zone, cnp->cn_pnbuf);
1416 VTONFS(dvp)->n_flag |= NMODIFIED;
1417 if (!wccflag)
1418 VTONFS(dvp)->n_attrstamp = 0;
1419 return (error);
1420}
1421
1422/*
1423 * nfs file remove call
1424 * To try and make nfs semantics closer to ufs semantics, a file that has
1425 * other processes using the vnode is renamed instead of removed and then
1426 * removed later on the last close.
1427 * - If v_usecount > 1
1428 * If a rename is not already in the works
1429 * call nfs_sillyrename() to set it up
1430 * else
1431 * do the remove rpc
1432 */
1433static int
1434nfs_remove(ap)
1435 struct vop_remove_args /* {
1436 struct vnodeop_desc *a_desc;
1437 struct vnode * a_dvp;
1438 struct vnode * a_vp;
1439 struct componentname * a_cnp;
1440 } */ *ap;
1441{
1442 register struct vnode *vp = ap->a_vp;
1443 register struct vnode *dvp = ap->a_dvp;
1444 register struct componentname *cnp = ap->a_cnp;
1445 register struct nfsnode *np = VTONFS(vp);
1446 int error = 0;
1447 struct vattr vattr;
1448
1449#ifndef DIAGNOSTIC
1450 if ((cnp->cn_flags & HASBUF) == 0)
1451 panic("nfs_remove: no name");
1452 if (vp->v_usecount < 1)
1453 panic("nfs_remove: bad v_usecount");
1454#endif
1455 if (vp->v_type == VDIR)
1456 error = EPERM;
1457 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1458 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1459 vattr.va_nlink > 1)) {
1460 /*
1461 * Purge the name cache so that the chance of a lookup for
1462 * the name succeeding while the remove is in progress is
1463 * minimized. Without node locking it can still happen, such
1464 * that an I/O op returns ESTALE, but since you get this if
1465 * another host removes the file..
1466 */
1467 cache_purge(vp);
1468 /*
1469 * throw away biocache buffers, mainly to avoid
1470 * unnecessary delayed writes later.
1471 */
1472 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1);
1473 /* Do the rpc */
1474 if (error != EINTR)
1475 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1476 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1477 /*
1478 * Kludge City: If the first reply to the remove rpc is lost..
1479 * the reply to the retransmitted request will be ENOENT
1480 * since the file was in fact removed
1481 * Therefore, we cheat and return success.
1482 */
1483 if (error == ENOENT)
1484 error = 0;
1485 } else if (!np->n_sillyrename)
1486 error = nfs_sillyrename(dvp, vp, cnp);
1487 zfree(namei_zone, cnp->cn_pnbuf);
1488 np->n_attrstamp = 0;
1489 return (error);
1490}
1491
1492/*
1493 * nfs file remove rpc called from nfs_inactive
1494 */
1495int
1496nfs_removeit(sp)
1497 register struct sillyrename *sp;
1498{
1499
1500 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1501 (struct proc *)0));
1502}
1503
1504/*
1505 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1506 */
1507static int
1508nfs_removerpc(dvp, name, namelen, cred, proc)
1509 register struct vnode *dvp;
1510 const char *name;
1511 int namelen;
1512 struct ucred *cred;
1513 struct proc *proc;
1514{
1515 register u_int32_t *tl;
1516 register caddr_t cp;
1517 register int32_t t1, t2;
1518 caddr_t bpos, dpos, cp2;
1519 int error = 0, wccflag = NFSV3_WCCRATTR;
1520 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1521 int v3 = NFS_ISV3(dvp);
1522
1523 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1524 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1525 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1526 nfsm_fhtom(dvp, v3);
1527 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1528 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred);
1529 if (v3)
1530 nfsm_wcc_data(dvp, wccflag);
1531 nfsm_reqdone;
1532 VTONFS(dvp)->n_flag |= NMODIFIED;
1533 if (!wccflag)
1534 VTONFS(dvp)->n_attrstamp = 0;
1535 return (error);
1536}
1537
1538/*
1539 * nfs file rename call
1540 */
1541static int
1542nfs_rename(ap)
1543 struct vop_rename_args /* {
1544 struct vnode *a_fdvp;
1545 struct vnode *a_fvp;
1546 struct componentname *a_fcnp;
1547 struct vnode *a_tdvp;
1548 struct vnode *a_tvp;
1549 struct componentname *a_tcnp;
1550 } */ *ap;
1551{
1552 register struct vnode *fvp = ap->a_fvp;
1553 register struct vnode *tvp = ap->a_tvp;
1554 register struct vnode *fdvp = ap->a_fdvp;
1555 register struct vnode *tdvp = ap->a_tdvp;
1556 register struct componentname *tcnp = ap->a_tcnp;
1557 register struct componentname *fcnp = ap->a_fcnp;
1558 int error;
1559
1560#ifndef DIAGNOSTIC
1561 if ((tcnp->cn_flags & HASBUF) == 0 ||
1562 (fcnp->cn_flags & HASBUF) == 0)
1563 panic("nfs_rename: no name");
1564#endif
1565 /* Check for cross-device rename */
1566 if ((fvp->v_mount != tdvp->v_mount) ||
1567 (tvp && (fvp->v_mount != tvp->v_mount))) {
1568 error = EXDEV;
1569 goto out;
1570 }
1571
1572 /*
1573 * We have to flush B_DELWRI data prior to renaming
1574 * the file. If we don't, the delayed-write buffers
1575 * can be flushed out later after the file has gone stale
1576 * under NFSV3. NFSV2 does not have this problem because
1577 * ( as far as I can tell ) it flushes dirty buffers more
1578 * often.
1579 */
1580
1581 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_proc);
1582 if (tvp)
1583 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_proc);
1584
1585 /*
1586 * If the tvp exists and is in use, sillyrename it before doing the
1587 * rename of the new file over it.
1588 * XXX Can't sillyrename a directory.
1589 */
1590 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1591 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1592 vput(tvp);
1593 tvp = NULL;
1594 }
1595
1596 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1597 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1598 tcnp->cn_proc);
1599
1600 if (fvp->v_type == VDIR) {
1601 if (tvp != NULL && tvp->v_type == VDIR)
1602 cache_purge(tdvp);
1603 cache_purge(fdvp);
1604 }
1605
1606out:
1607 VOP_ABORTOP(tdvp, tcnp);
1608 if (tdvp == tvp)
1609 vrele(tdvp);
1610 else
1611 vput(tdvp);
1612 if (tvp)
1613 vput(tvp);
1614 VOP_ABORTOP(fdvp, fcnp);
1615 vrele(fdvp);
1616 vrele(fvp);
1617 /*
1618 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1619 */
1620 if (error == ENOENT)
1621 error = 0;
1622 return (error);
1623}
1624
1625/*
1626 * nfs file rename rpc called from nfs_remove() above
1627 */
1628static int
1629nfs_renameit(sdvp, scnp, sp)
1630 struct vnode *sdvp;
1631 struct componentname *scnp;
1632 register struct sillyrename *sp;
1633{
1634 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1635 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc));
1636}
1637
1638/*
1639 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1640 */
1641static int
1642nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc)
1643 register struct vnode *fdvp;
1644 const char *fnameptr;
1645 int fnamelen;
1646 register struct vnode *tdvp;
1647 const char *tnameptr;
1648 int tnamelen;
1649 struct ucred *cred;
1650 struct proc *proc;
1651{
1652 register u_int32_t *tl;
1653 register caddr_t cp;
1654 register int32_t t1, t2;
1655 caddr_t bpos, dpos, cp2;
1656 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1657 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1658 int v3 = NFS_ISV3(fdvp);
1659
1660 nfsstats.rpccnt[NFSPROC_RENAME]++;
1661 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1662 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1663 nfsm_rndup(tnamelen));
1664 nfsm_fhtom(fdvp, v3);
1665 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1666 nfsm_fhtom(tdvp, v3);
1667 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1668 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred);
1669 if (v3) {
1670 nfsm_wcc_data(fdvp, fwccflag);
1671 nfsm_wcc_data(tdvp, twccflag);
1672 }
1673 nfsm_reqdone;
1674 VTONFS(fdvp)->n_flag |= NMODIFIED;
1675 VTONFS(tdvp)->n_flag |= NMODIFIED;
1676 if (!fwccflag)
1677 VTONFS(fdvp)->n_attrstamp = 0;
1678 if (!twccflag)
1679 VTONFS(tdvp)->n_attrstamp = 0;
1680 return (error);
1681}
1682
1683/*
1684 * nfs hard link create call
1685 */
1686static int
1687nfs_link(ap)
1688 struct vop_link_args /* {
1689 struct vnode *a_tdvp;
1690 struct vnode *a_vp;
1691 struct componentname *a_cnp;
1692 } */ *ap;
1693{
1694 register struct vnode *vp = ap->a_vp;
1695 register struct vnode *tdvp = ap->a_tdvp;
1696 register struct componentname *cnp = ap->a_cnp;
1697 register u_int32_t *tl;
1698 register caddr_t cp;
1699 register int32_t t1, t2;
1700 caddr_t bpos, dpos, cp2;
1701 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1702 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1703 int v3;
1704
1705 if (vp->v_mount != tdvp->v_mount) {
1706 VOP_ABORTOP(tdvp, cnp);
1707 return (EXDEV);
1708 }
1709
1710 /*
1711 * Push all writes to the server, so that the attribute cache
1712 * doesn't get "out of sync" with the server.
1713 * XXX There should be a better way!
1714 */
1715 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc);
1716
1717 v3 = NFS_ISV3(vp);
1718 nfsstats.rpccnt[NFSPROC_LINK]++;
1719 nfsm_reqhead(vp, NFSPROC_LINK,
1720 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1721 nfsm_fhtom(vp, v3);
1722 nfsm_fhtom(tdvp, v3);
1723 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1724 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred);
1725 if (v3) {
1726 nfsm_postop_attr(vp, attrflag);
1727 nfsm_wcc_data(tdvp, wccflag);
1728 }
1729 nfsm_reqdone;
1730 zfree(namei_zone, cnp->cn_pnbuf);
1731 VTONFS(tdvp)->n_flag |= NMODIFIED;
1732 if (!attrflag)
1733 VTONFS(vp)->n_attrstamp = 0;
1734 if (!wccflag)
1735 VTONFS(tdvp)->n_attrstamp = 0;
1736 /*
1737 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1738 */
1739 if (error == EEXIST)
1740 error = 0;
1741 return (error);
1742}
1743
1744/*
1745 * nfs symbolic link create call
1746 */
1747static int
1748nfs_symlink(ap)
1749 struct vop_symlink_args /* {
1750 struct vnode *a_dvp;
1751 struct vnode **a_vpp;
1752 struct componentname *a_cnp;
1753 struct vattr *a_vap;
1754 char *a_target;
1755 } */ *ap;
1756{
1757 register struct vnode *dvp = ap->a_dvp;
1758 register struct vattr *vap = ap->a_vap;
1759 register struct componentname *cnp = ap->a_cnp;
1760 register struct nfsv2_sattr *sp;
1761 register u_int32_t *tl;
1762 register caddr_t cp;
1763 register int32_t t1, t2;
1764 caddr_t bpos, dpos, cp2;
1765 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1766 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1767 struct vnode *newvp = (struct vnode *)0;
1768 int v3 = NFS_ISV3(dvp);
1769
1770 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1771 slen = strlen(ap->a_target);
1772 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1773 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1774 nfsm_fhtom(dvp, v3);
1775 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1776 if (v3) {
1777 nfsm_v3attrbuild(vap, FALSE);
1778 }
1779 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1780 if (!v3) {
1781 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1782 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1783 sp->sa_uid = nfs_xdrneg1;
1784 sp->sa_gid = nfs_xdrneg1;
1785 sp->sa_size = nfs_xdrneg1;
1786 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1787 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1788 }
1789 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred);
1790 if (v3) {
1791 if (!error)
1792 nfsm_mtofh(dvp, newvp, v3, gotvp);
1793 nfsm_wcc_data(dvp, wccflag);
1794 }
1795 nfsm_reqdone;
1796 if (newvp)
1797 vput(newvp);
1798 VTONFS(dvp)->n_flag |= NMODIFIED;
1799 if (!wccflag)
1800 VTONFS(dvp)->n_attrstamp = 0;
1801 /*
1802 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1803 */
1804 if (error == EEXIST)
1805 error = 0;
1806 /*
1807 * cnp's buffer expected to be freed if SAVESTART not set or
1808 * if an error was returned.
1809 */
1810 if (error || (cnp->cn_flags & SAVESTART) == 0)
1811 zfree(namei_zone, cnp->cn_pnbuf);
1812 return (error);
1813}
1814
1815/*
1816 * nfs make dir call
1817 */
1818static int
1819nfs_mkdir(ap)
1820 struct vop_mkdir_args /* {
1821 struct vnode *a_dvp;
1822 struct vnode **a_vpp;
1823 struct componentname *a_cnp;
1824 struct vattr *a_vap;
1825 } */ *ap;
1826{
1827 register struct vnode *dvp = ap->a_dvp;
1828 register struct vattr *vap = ap->a_vap;
1829 register struct componentname *cnp = ap->a_cnp;
1830 register struct nfsv2_sattr *sp;
1831 register u_int32_t *tl;
1832 register caddr_t cp;
1833 register int32_t t1, t2;
1834 register int len;
1835 struct nfsnode *np = (struct nfsnode *)0;
1836 struct vnode *newvp = (struct vnode *)0;
1837 caddr_t bpos, dpos, cp2;
1838 int error = 0, wccflag = NFSV3_WCCRATTR;
1839 int gotvp = 0;
1840 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1841 struct vattr vattr;
1842 int v3 = NFS_ISV3(dvp);
1843
1844 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1845 VOP_ABORTOP(dvp, cnp);
1846 return (error);
1847 }
1848 len = cnp->cn_namelen;
1849 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1850 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1851 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1852 nfsm_fhtom(dvp, v3);
1853 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1854 if (v3) {
1855 nfsm_v3attrbuild(vap, FALSE);
1856 } else {
1857 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1858 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1859 sp->sa_uid = nfs_xdrneg1;
1860 sp->sa_gid = nfs_xdrneg1;
1861 sp->sa_size = nfs_xdrneg1;
1862 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1863 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1864 }
1865 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred);
1866 if (!error)
1867 nfsm_mtofh(dvp, newvp, v3, gotvp);
1868 if (v3)
1869 nfsm_wcc_data(dvp, wccflag);
1870 nfsm_reqdone;
1871 VTONFS(dvp)->n_flag |= NMODIFIED;
1872 if (!wccflag)
1873 VTONFS(dvp)->n_attrstamp = 0;
1874 /*
1875 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1876 * if we can succeed in looking up the directory.
1877 */
1878 if (error == EEXIST || (!error && !gotvp)) {
1879 if (newvp) {
1880 vrele(newvp);
1881 newvp = (struct vnode *)0;
1882 }
1883 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1884 cnp->cn_proc, &np);
1885 if (!error) {
1886 newvp = NFSTOV(np);
1887 if (newvp->v_type != VDIR)
1888 error = EEXIST;
1889 }
1890 }
1891 if (error) {
1892 if (newvp)
1893 vrele(newvp);
1894 } else
1895 *ap->a_vpp = newvp;
1896 if (error || (cnp->cn_flags & SAVESTART) == 0)
1897 zfree(namei_zone, cnp->cn_pnbuf);
1898 return (error);
1899}
1900
1901/*
1902 * nfs remove directory call
1903 */
1904static int
1905nfs_rmdir(ap)
1906 struct vop_rmdir_args /* {
1907 struct vnode *a_dvp;
1908 struct vnode *a_vp;
1909 struct componentname *a_cnp;
1910 } */ *ap;
1911{
1912 register struct vnode *vp = ap->a_vp;
1913 register struct vnode *dvp = ap->a_dvp;
1914 register struct componentname *cnp = ap->a_cnp;
1915 register u_int32_t *tl;
1916 register caddr_t cp;
1917 register int32_t t1, t2;
1918 caddr_t bpos, dpos, cp2;
1919 int error = 0, wccflag = NFSV3_WCCRATTR;
1920 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1921 int v3 = NFS_ISV3(dvp);
1922
1923 if (dvp == vp)
1924 return (EINVAL);
1925 nfsstats.rpccnt[NFSPROC_RMDIR]++;
1926 nfsm_reqhead(dvp, NFSPROC_RMDIR,
1927 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1928 nfsm_fhtom(dvp, v3);
1929 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1930 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred);
1931 if (v3)
1932 nfsm_wcc_data(dvp, wccflag);
1933 nfsm_reqdone;
1934 zfree(namei_zone, cnp->cn_pnbuf);
1935 VTONFS(dvp)->n_flag |= NMODIFIED;
1936 if (!wccflag)
1937 VTONFS(dvp)->n_attrstamp = 0;
1938 cache_purge(dvp);
1939 cache_purge(vp);
1940 /*
1941 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
1942 */
1943 if (error == ENOENT)
1944 error = 0;
1945 return (error);
1946}
1947
1948/*
1949 * nfs readdir call
1950 */
1951static int
1952nfs_readdir(ap)
1953 struct vop_readdir_args /* {
1954 struct vnode *a_vp;
1955 struct uio *a_uio;
1956 struct ucred *a_cred;
1957 } */ *ap;
1958{
1959 register struct vnode *vp = ap->a_vp;
1960 register struct nfsnode *np = VTONFS(vp);
1961 register struct uio *uio = ap->a_uio;
1962 int tresid, error;
1963 struct vattr vattr;
1964
1965 if (vp->v_type != VDIR)
1966 return (EPERM);
1967 /*
1968 * First, check for hit on the EOF offset cache
1969 */
1970 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
1971 (np->n_flag & NMODIFIED) == 0) {
1972 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
1973 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
1974 nfsstats.direofcache_hits++;
1975 return (0);
1976 }
1977 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
1978 np->n_mtime == vattr.va_mtime.tv_sec) {
1979 nfsstats.direofcache_hits++;
1980 return (0);
1981 }
1982 }
1983
1984 /*
1985 * Call nfs_bioread() to do the real work.
1986 */
1987 tresid = uio->uio_resid;
1988 error = nfs_bioread(vp, uio, 0, ap->a_cred);
1989
1990 if (!error && uio->uio_resid == tresid)
1991 nfsstats.direofcache_misses++;
1992 return (error);
1993}
1994
1995/*
1996 * Readdir rpc call.
1997 * Called from below the buffer cache by nfs_doio().
1998 */
1999int
2000nfs_readdirrpc(vp, uiop, cred)
2001 struct vnode *vp;
2002 register struct uio *uiop;
2003 struct ucred *cred;
2004
2005{
2006 register int len, left;
2007 register struct dirent *dp = NULL;
2008 register u_int32_t *tl;
2009 register caddr_t cp;
2010 register int32_t t1, t2;
2011 register nfsuint64 *cookiep;
2012 caddr_t bpos, dpos, cp2;
2013 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2014 nfsuint64 cookie;
2015 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2016 struct nfsnode *dnp = VTONFS(vp);
2017 u_quad_t fileno;
2018 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2019 int attrflag;
2020 int v3 = NFS_ISV3(vp);
2021
2022#ifndef DIAGNOSTIC
2023 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2024 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2025 panic("nfs readdirrpc bad uio");
2026#endif
2027
2028 /*
2029 * If there is no cookie, assume directory was stale.
2030 */
2031 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2032 if (cookiep)
2033 cookie = *cookiep;
2034 else
2035 return (NFSERR_BAD_COOKIE);
2036 /*
2037 * Loop around doing readdir rpc's of size nm_readdirsize
2038 * truncated to a multiple of DIRBLKSIZ.
2039 * The stopping criteria is EOF or buffer full.
2040 */
2041 while (more_dirs && bigenough) {
2042 nfsstats.rpccnt[NFSPROC_READDIR]++;
2043 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2044 NFSX_READDIR(v3));
2045 nfsm_fhtom(vp, v3);
2046 if (v3) {
2047 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2048 *tl++ = cookie.nfsuquad[0];
2049 *tl++ = cookie.nfsuquad[1];
2050 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2051 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2052 } else {
2053 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2054 *tl++ = cookie.nfsuquad[0];
2055 }
2056 *tl = txdr_unsigned(nmp->nm_readdirsize);
2057 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred);
2058 if (v3) {
2059 nfsm_postop_attr(vp, attrflag);
2060 if (!error) {
2061 nfsm_dissect(tl, u_int32_t *,
2062 2 * NFSX_UNSIGNED);
2063 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2064 dnp->n_cookieverf.nfsuquad[1] = *tl;
2065 } else {
2066 m_freem(mrep);
2067 goto nfsmout;
2068 }
2069 }
2070 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2071 more_dirs = fxdr_unsigned(int, *tl);
2072
2073 /* loop thru the dir entries, doctoring them to 4bsd form */
2074 while (more_dirs && bigenough) {
2075 if (v3) {
2076 nfsm_dissect(tl, u_int32_t *,
2077 3 * NFSX_UNSIGNED);
2078 fxdr_hyper(tl, &fileno);
2079 len = fxdr_unsigned(int, *(tl + 2));
2080 } else {
2081 nfsm_dissect(tl, u_int32_t *,
2082 2 * NFSX_UNSIGNED);
2083 fileno = fxdr_unsigned(u_quad_t, *tl++);
2084 len = fxdr_unsigned(int, *tl);
2085 }
2086 if (len <= 0 || len > NFS_MAXNAMLEN) {
2087 error = EBADRPC;
2088 m_freem(mrep);
2089 goto nfsmout;
2090 }
2091 tlen = nfsm_rndup(len);
2092 if (tlen == len)
2093 tlen += 4; /* To ensure null termination */
2094 left = DIRBLKSIZ - blksiz;
2095 if ((tlen + DIRHDSIZ) > left) {
2096 dp->d_reclen += left;
2097 uiop->uio_iov->iov_base += left;
2098 uiop->uio_iov->iov_len -= left;
2099 uiop->uio_offset += left;
2100 uiop->uio_resid -= left;
2101 blksiz = 0;
2102 }
2103 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2104 bigenough = 0;
2105 if (bigenough) {
2106 dp = (struct dirent *)uiop->uio_iov->iov_base;
2107 dp->d_fileno = (int)fileno;
2108 dp->d_namlen = len;
2109 dp->d_reclen = tlen + DIRHDSIZ;
2110 dp->d_type = DT_UNKNOWN;
2111 blksiz += dp->d_reclen;
2112 if (blksiz == DIRBLKSIZ)
2113 blksiz = 0;
2114 uiop->uio_offset += DIRHDSIZ;
2115 uiop->uio_resid -= DIRHDSIZ;
2116 uiop->uio_iov->iov_base += DIRHDSIZ;
2117 uiop->uio_iov->iov_len -= DIRHDSIZ;
2118 nfsm_mtouio(uiop, len);
2119 cp = uiop->uio_iov->iov_base;
2120 tlen -= len;
2121 *cp = '\0'; /* null terminate */
2122 uiop->uio_iov->iov_base += tlen;
2123 uiop->uio_iov->iov_len -= tlen;
2124 uiop->uio_offset += tlen;
2125 uiop->uio_resid -= tlen;
2126 } else
2127 nfsm_adv(nfsm_rndup(len));
2128 if (v3) {
2129 nfsm_dissect(tl, u_int32_t *,
2130 3 * NFSX_UNSIGNED);
2131 } else {
2132 nfsm_dissect(tl, u_int32_t *,
2133 2 * NFSX_UNSIGNED);
2134 }
2135 if (bigenough) {
2136 cookie.nfsuquad[0] = *tl++;
2137 if (v3)
2138 cookie.nfsuquad[1] = *tl++;
2139 } else if (v3)
2140 tl += 2;
2141 else
2142 tl++;
2143 more_dirs = fxdr_unsigned(int, *tl);
2144 }
2145 /*
2146 * If at end of rpc data, get the eof boolean
2147 */
2148 if (!more_dirs) {
2149 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2150 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2151 }
2152 m_freem(mrep);
2153 }
2154 /*
2155 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2156 * by increasing d_reclen for the last record.
2157 */
2158 if (blksiz > 0) {
2159 left = DIRBLKSIZ - blksiz;
2160 dp->d_reclen += left;
2161 uiop->uio_iov->iov_base += left;
2162 uiop->uio_iov->iov_len -= left;
2163 uiop->uio_offset += left;
2164 uiop->uio_resid -= left;
2165 }
2166
2167 /*
2168 * We are now either at the end of the directory or have filled the
2169 * block.
2170 */
2171 if (bigenough)
2172 dnp->n_direofoffset = uiop->uio_offset;
2173 else {
2174 if (uiop->uio_resid > 0)
2175 printf("EEK! readdirrpc resid > 0\n");
2176 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2177 *cookiep = cookie;
2178 }
2179nfsmout:
2180 return (error);
2181}
2182
2183/*
2184 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2185 */
2186int
2187nfs_readdirplusrpc(vp, uiop, cred)
2188 struct vnode *vp;
2189 register struct uio *uiop;
2190 struct ucred *cred;
2191{
2192 register int len, left;
2193 register struct dirent *dp;
2194 register u_int32_t *tl;
2195 register caddr_t cp;
2196 register int32_t t1, t2;
2197 register struct vnode *newvp;
2198 register nfsuint64 *cookiep;
2199 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2200 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2201 struct nameidata nami, *ndp = &nami;
2202 struct componentname *cnp = &ndp->ni_cnd;
2203 nfsuint64 cookie;
2204 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2205 struct nfsnode *dnp = VTONFS(vp), *np;
2206 nfsfh_t *fhp;
2207 u_quad_t fileno;
2208 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2209 int attrflag, fhsize;
2210
2211#ifndef nolint
2212 dp = (struct dirent *)0;
2213#endif
2214#ifndef DIAGNOSTIC
2215 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2216 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2217 panic("nfs readdirplusrpc bad uio");
2218#endif
2219 ndp->ni_dvp = vp;
2220 newvp = NULLVP;
2221
2222 /*
2223 * If there is no cookie, assume directory was stale.
2224 */
2225 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2226 if (cookiep)
2227 cookie = *cookiep;
2228 else
2229 return (NFSERR_BAD_COOKIE);
2230 /*
2231 * Loop around doing readdir rpc's of size nm_readdirsize
2232 * truncated to a multiple of DIRBLKSIZ.
2233 * The stopping criteria is EOF or buffer full.
2234 */
2235 while (more_dirs && bigenough) {
2236 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2237 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2238 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2239 nfsm_fhtom(vp, 1);
2240 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2241 *tl++ = cookie.nfsuquad[0];
2242 *tl++ = cookie.nfsuquad[1];
2243 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2244 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2245 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2246 *tl = txdr_unsigned(nmp->nm_rsize);
2247 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred);
2248 nfsm_postop_attr(vp, attrflag);
2249 if (error) {
2250 m_freem(mrep);
2251 goto nfsmout;
2252 }
2253 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2254 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2255 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2256 more_dirs = fxdr_unsigned(int, *tl);
2257
2258 /* loop thru the dir entries, doctoring them to 4bsd form */
2259 while (more_dirs && bigenough) {
2260 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2261 fxdr_hyper(tl, &fileno);
2262 len = fxdr_unsigned(int, *(tl + 2));
2263 if (len <= 0 || len > NFS_MAXNAMLEN) {
2264 error = EBADRPC;
2265 m_freem(mrep);
2266 goto nfsmout;
2267 }
2268 tlen = nfsm_rndup(len);
2269 if (tlen == len)
2270 tlen += 4; /* To ensure null termination*/
2271 left = DIRBLKSIZ - blksiz;
2272 if ((tlen + DIRHDSIZ) > left) {
2273 dp->d_reclen += left;
2274 uiop->uio_iov->iov_base += left;
2275 uiop->uio_iov->iov_len -= left;
2276 uiop->uio_offset += left;
2277 uiop->uio_resid -= left;
2278 blksiz = 0;
2279 }
2280 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2281 bigenough = 0;
2282 if (bigenough) {
2283 dp = (struct dirent *)uiop->uio_iov->iov_base;
2284 dp->d_fileno = (int)fileno;
2285 dp->d_namlen = len;
2286 dp->d_reclen = tlen + DIRHDSIZ;
2287 dp->d_type = DT_UNKNOWN;
2288 blksiz += dp->d_reclen;
2289 if (blksiz == DIRBLKSIZ)
2290 blksiz = 0;
2291 uiop->uio_offset += DIRHDSIZ;
2292 uiop->uio_resid -= DIRHDSIZ;
2293 uiop->uio_iov->iov_base += DIRHDSIZ;
2294 uiop->uio_iov->iov_len -= DIRHDSIZ;
2295 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2296 cnp->cn_namelen = len;
2297 nfsm_mtouio(uiop, len);
2298 cp = uiop->uio_iov->iov_base;
2299 tlen -= len;
2300 *cp = '\0';
2301 uiop->uio_iov->iov_base += tlen;
2302 uiop->uio_iov->iov_len -= tlen;
2303 uiop->uio_offset += tlen;
2304 uiop->uio_resid -= tlen;
2305 } else
2306 nfsm_adv(nfsm_rndup(len));
2307 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2308 if (bigenough) {
2309 cookie.nfsuquad[0] = *tl++;
2310 cookie.nfsuquad[1] = *tl++;
2311 } else
2312 tl += 2;
2313
2314 /*
2315 * Since the attributes are before the file handle
2316 * (sigh), we must skip over the attributes and then
2317 * come back and get them.
2318 */
2319 attrflag = fxdr_unsigned(int, *tl);
2320 if (attrflag) {
2321 dpossav1 = dpos;
2322 mdsav1 = md;
2323 nfsm_adv(NFSX_V3FATTR);
2324 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2325 doit = fxdr_unsigned(int, *tl);
2326 if (doit) {
2327 nfsm_getfh(fhp, fhsize, 1);
2328 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2329 VREF(vp);
2330 newvp = vp;
2331 np = dnp;
2332 } else {
2333 error = nfs_nget(vp->v_mount, fhp,
2334 fhsize, &np);
2335 if (error)
2336 doit = 0;
2337 else
2338 newvp = NFSTOV(np);
2339 }
2340 }
2341 if (doit) {
2342 dpossav2 = dpos;
2343 dpos = dpossav1;
2344 mdsav2 = md;
2345 md = mdsav1;
2346 nfsm_loadattr(newvp, (struct vattr *)0);
2347 dpos = dpossav2;
2348 md = mdsav2;
2349 dp->d_type =
2350 IFTODT(VTTOIF(np->n_vattr.va_type));
2351 ndp->ni_vp = newvp;
2352 cnp->cn_hash = 0;
2353 for (cp = cnp->cn_nameptr, i = 1; i <= len;
2354 i++, cp++)
2355 cnp->cn_hash += (unsigned char)*cp * i;
2356 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2357 }
2358 } else {
2359 /* Just skip over the file handle */
2360 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2361 i = fxdr_unsigned(int, *tl);
2362 nfsm_adv(nfsm_rndup(i));
2363 }
2364 if (newvp != NULLVP) {
2365 vrele(newvp);
2366 newvp = NULLVP;
2367 }
2368 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2369 more_dirs = fxdr_unsigned(int, *tl);
2370 }
2371 /*
2372 * If at end of rpc data, get the eof boolean
2373 */
2374 if (!more_dirs) {
2375 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2376 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2377 }
2378 m_freem(mrep);
2379 }
2380 /*
2381 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2382 * by increasing d_reclen for the last record.
2383 */
2384 if (blksiz > 0) {
2385 left = DIRBLKSIZ - blksiz;
2386 dp->d_reclen += left;
2387 uiop->uio_iov->iov_base += left;
2388 uiop->uio_iov->iov_len -= left;
2389 uiop->uio_offset += left;
2390 uiop->uio_resid -= left;
2391 }
2392
2393 /*
2394 * We are now either at the end of the directory or have filled the
2395 * block.
2396 */
2397 if (bigenough)
2398 dnp->n_direofoffset = uiop->uio_offset;
2399 else {
2400 if (uiop->uio_resid > 0)
2401 printf("EEK! readdirplusrpc resid > 0\n");
2402 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2403 *cookiep = cookie;
2404 }
2405nfsmout:
2406 if (newvp != NULLVP) {
2407 if (newvp == vp)
2408 vrele(newvp);
2409 else
2410 vput(newvp);
2411 newvp = NULLVP;
2412 }
2413 return (error);
2414}
2415
2416/*
2417 * Silly rename. To make the NFS filesystem that is stateless look a little
2418 * more like the "ufs" a remove of an active vnode is translated to a rename
2419 * to a funny looking filename that is removed by nfs_inactive on the
2420 * nfsnode. There is the potential for another process on a different client
2421 * to create the same funny name between the nfs_lookitup() fails and the
2422 * nfs_rename() completes, but...
2423 */
2424static int
2425nfs_sillyrename(dvp, vp, cnp)
2426 struct vnode *dvp, *vp;
2427 struct componentname *cnp;
2428{
2429 register struct sillyrename *sp;
2430 struct nfsnode *np;
2431 int error;
2432 short pid;
2433
2434 cache_purge(dvp);
2435 np = VTONFS(vp);
2436#ifndef DIAGNOSTIC
2437 if (vp->v_type == VDIR)
2438 panic("nfs: sillyrename dir");
2439#endif
2440 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2441 M_NFSREQ, M_WAITOK);
2442 sp->s_cred = crdup(cnp->cn_cred);
2443 sp->s_dvp = dvp;
2444 VREF(dvp);
2445
2446 /* Fudge together a funny name */
2447 pid = cnp->cn_proc->p_pid;
2448 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2449
2450 /* Try lookitups until we get one that isn't there */
2451 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2452 cnp->cn_proc, (struct nfsnode **)0) == 0) {
2453 sp->s_name[4]++;
2454 if (sp->s_name[4] > 'z') {
2455 error = EINVAL;
2456 goto bad;
2457 }
2458 }
2459 error = nfs_renameit(dvp, cnp, sp);
2460 if (error)
2461 goto bad;
2462 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2463 cnp->cn_proc, &np);
2464 np->n_sillyrename = sp;
2465 return (0);
2466bad:
2467 vrele(sp->s_dvp);
2468 crfree(sp->s_cred);
2469 free((caddr_t)sp, M_NFSREQ);
2470 return (error);
2471}
2472
2473/*
2474 * Look up a file name and optionally either update the file handle or
2475 * allocate an nfsnode, depending on the value of npp.
2476 * npp == NULL --> just do the lookup
2477 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2478 * handled too
2479 * *npp != NULL --> update the file handle in the vnode
2480 */
2481static int
2482nfs_lookitup(dvp, name, len, cred, procp, npp)
2483 register struct vnode *dvp;
2484 const char *name;
2485 int len;
2486 struct ucred *cred;
2487 struct proc *procp;
2488 struct nfsnode **npp;
2489{
2490 register u_int32_t *tl;
2491 register caddr_t cp;
2492 register int32_t t1, t2;
2493 struct vnode *newvp = (struct vnode *)0;
2494 struct nfsnode *np, *dnp = VTONFS(dvp);
2495 caddr_t bpos, dpos, cp2;
2496 int error = 0, fhlen, attrflag;
2497 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2498 nfsfh_t *nfhp;
2499 int v3 = NFS_ISV3(dvp);
2500
2501 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2502 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2503 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2504 nfsm_fhtom(dvp, v3);
2505 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2506 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred);
2507 if (npp && !error) {
2508 nfsm_getfh(nfhp, fhlen, v3);
2509 if (*npp) {
2510 np = *npp;
2511 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2512 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2513 np->n_fhp = &np->n_fh;
2514 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2515 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2516 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2517 np->n_fhsize = fhlen;
2518 newvp = NFSTOV(np);
2519 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2520 VREF(dvp);
2521 newvp = dvp;
2522 } else {
2523 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2524 if (error) {
2525 m_freem(mrep);
2526 return (error);
2527 }
2528 newvp = NFSTOV(np);
2529 }
2530 if (v3) {
2531 nfsm_postop_attr(newvp, attrflag);
2532 if (!attrflag && *npp == NULL) {
2533 m_freem(mrep);
2534 if (newvp == dvp)
2535 vrele(newvp);
2536 else
2537 vput(newvp);
2538 return (ENOENT);
2539 }
2540 } else
2541 nfsm_loadattr(newvp, (struct vattr *)0);
2542 }
2543 nfsm_reqdone;
2544 if (npp && *npp == NULL) {
2545 if (error) {
38 */
39
40
41/*
42 * vnode op calls for Sun NFS version 2 and 3
43 */
44
45#include "opt_inet.h"
46
47#include <sys/param.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50#include <sys/resourcevar.h>
51#include <sys/proc.h>
52#include <sys/mount.h>
53#include <sys/buf.h>
54#include <sys/malloc.h>
55#include <sys/mbuf.h>
56#include <sys/namei.h>
57#include <sys/socket.h>
58#include <sys/vnode.h>
59#include <sys/dirent.h>
60#include <sys/fcntl.h>
61#include <sys/lockf.h>
62#include <sys/stat.h>
63#include <sys/sysctl.h>
64
65#include <vm/vm.h>
66#include <vm/vm_extern.h>
67#include <vm/vm_zone.h>
68
69#include <miscfs/fifofs/fifo.h>
70#include <miscfs/specfs/specdev.h>
71
72#include <nfs/rpcv2.h>
73#include <nfs/nfsproto.h>
74#include <nfs/nfs.h>
75#include <nfs/nfsnode.h>
76#include <nfs/nfsmount.h>
77#include <nfs/xdr_subs.h>
78#include <nfs/nfsm_subs.h>
79#include <nfs/nqnfs.h>
80
81#include <net/if.h>
82#include <netinet/in.h>
83#include <netinet/in_var.h>
84
85/* Defs */
86#define TRUE 1
87#define FALSE 0
88
89/*
90 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
91 * calls are not in getblk() and brelse() so that they would not be necessary
92 * here.
93 */
94#ifndef B_VMIO
95#define vfs_busy_pages(bp, f)
96#endif
97
98static int nfsspec_read __P((struct vop_read_args *));
99static int nfsspec_write __P((struct vop_write_args *));
100static int nfsfifo_read __P((struct vop_read_args *));
101static int nfsfifo_write __P((struct vop_write_args *));
102static int nfsspec_close __P((struct vop_close_args *));
103static int nfsfifo_close __P((struct vop_close_args *));
104#define nfs_poll vop_nopoll
105static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int));
106static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *));
107static int nfs_lookup __P((struct vop_lookup_args *));
108static int nfs_create __P((struct vop_create_args *));
109static int nfs_mknod __P((struct vop_mknod_args *));
110static int nfs_open __P((struct vop_open_args *));
111static int nfs_close __P((struct vop_close_args *));
112static int nfs_access __P((struct vop_access_args *));
113static int nfs_getattr __P((struct vop_getattr_args *));
114static int nfs_setattr __P((struct vop_setattr_args *));
115static int nfs_read __P((struct vop_read_args *));
116static int nfs_mmap __P((struct vop_mmap_args *));
117static int nfs_fsync __P((struct vop_fsync_args *));
118static int nfs_remove __P((struct vop_remove_args *));
119static int nfs_link __P((struct vop_link_args *));
120static int nfs_rename __P((struct vop_rename_args *));
121static int nfs_mkdir __P((struct vop_mkdir_args *));
122static int nfs_rmdir __P((struct vop_rmdir_args *));
123static int nfs_symlink __P((struct vop_symlink_args *));
124static int nfs_readdir __P((struct vop_readdir_args *));
125static int nfs_bmap __P((struct vop_bmap_args *));
126static int nfs_strategy __P((struct vop_strategy_args *));
127static int nfs_lookitup __P((struct vnode *, const char *, int,
128 struct ucred *, struct proc *, struct nfsnode **));
129static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *));
130static int nfsspec_access __P((struct vop_access_args *));
131static int nfs_readlink __P((struct vop_readlink_args *));
132static int nfs_print __P((struct vop_print_args *));
133static int nfs_advlock __P((struct vop_advlock_args *));
134static int nfs_bwrite __P((struct vop_bwrite_args *));
135/*
136 * Global vfs data structures for nfs
137 */
138vop_t **nfsv2_vnodeop_p;
139static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
140 { &vop_default_desc, (vop_t *) vop_defaultop },
141 { &vop_abortop_desc, (vop_t *) nfs_abortop },
142 { &vop_access_desc, (vop_t *) nfs_access },
143 { &vop_advlock_desc, (vop_t *) nfs_advlock },
144 { &vop_bmap_desc, (vop_t *) nfs_bmap },
145 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
146 { &vop_close_desc, (vop_t *) nfs_close },
147 { &vop_create_desc, (vop_t *) nfs_create },
148 { &vop_fsync_desc, (vop_t *) nfs_fsync },
149 { &vop_getattr_desc, (vop_t *) nfs_getattr },
150 { &vop_getpages_desc, (vop_t *) nfs_getpages },
151 { &vop_putpages_desc, (vop_t *) nfs_putpages },
152 { &vop_inactive_desc, (vop_t *) nfs_inactive },
153 { &vop_lease_desc, (vop_t *) vop_null },
154 { &vop_link_desc, (vop_t *) nfs_link },
155 { &vop_lock_desc, (vop_t *) vop_sharedlock },
156 { &vop_lookup_desc, (vop_t *) nfs_lookup },
157 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
158 { &vop_mknod_desc, (vop_t *) nfs_mknod },
159 { &vop_mmap_desc, (vop_t *) nfs_mmap },
160 { &vop_open_desc, (vop_t *) nfs_open },
161 { &vop_poll_desc, (vop_t *) nfs_poll },
162 { &vop_print_desc, (vop_t *) nfs_print },
163 { &vop_read_desc, (vop_t *) nfs_read },
164 { &vop_readdir_desc, (vop_t *) nfs_readdir },
165 { &vop_readlink_desc, (vop_t *) nfs_readlink },
166 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
167 { &vop_remove_desc, (vop_t *) nfs_remove },
168 { &vop_rename_desc, (vop_t *) nfs_rename },
169 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
170 { &vop_setattr_desc, (vop_t *) nfs_setattr },
171 { &vop_strategy_desc, (vop_t *) nfs_strategy },
172 { &vop_symlink_desc, (vop_t *) nfs_symlink },
173 { &vop_write_desc, (vop_t *) nfs_write },
174 { NULL, NULL }
175};
176static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
177 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
178VNODEOP_SET(nfsv2_vnodeop_opv_desc);
179
180/*
181 * Special device vnode ops
182 */
183vop_t **spec_nfsv2nodeop_p;
184static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
185 { &vop_default_desc, (vop_t *) spec_vnoperate },
186 { &vop_access_desc, (vop_t *) nfsspec_access },
187 { &vop_close_desc, (vop_t *) nfsspec_close },
188 { &vop_fsync_desc, (vop_t *) nfs_fsync },
189 { &vop_getattr_desc, (vop_t *) nfs_getattr },
190 { &vop_inactive_desc, (vop_t *) nfs_inactive },
191 { &vop_lock_desc, (vop_t *) vop_sharedlock },
192 { &vop_print_desc, (vop_t *) nfs_print },
193 { &vop_read_desc, (vop_t *) nfsspec_read },
194 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
195 { &vop_setattr_desc, (vop_t *) nfs_setattr },
196 { &vop_write_desc, (vop_t *) nfsspec_write },
197 { NULL, NULL }
198};
199static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
200 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
201VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
202
203vop_t **fifo_nfsv2nodeop_p;
204static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
205 { &vop_default_desc, (vop_t *) fifo_vnoperate },
206 { &vop_access_desc, (vop_t *) nfsspec_access },
207 { &vop_close_desc, (vop_t *) nfsfifo_close },
208 { &vop_fsync_desc, (vop_t *) nfs_fsync },
209 { &vop_getattr_desc, (vop_t *) nfs_getattr },
210 { &vop_inactive_desc, (vop_t *) nfs_inactive },
211 { &vop_lock_desc, (vop_t *) vop_sharedlock },
212 { &vop_print_desc, (vop_t *) nfs_print },
213 { &vop_read_desc, (vop_t *) nfsfifo_read },
214 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
215 { &vop_setattr_desc, (vop_t *) nfs_setattr },
216 { &vop_write_desc, (vop_t *) nfsfifo_write },
217 { NULL, NULL }
218};
219static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
220 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
221VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
222
223static int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt,
224 struct ucred *cred, struct proc *procp));
225static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp,
226 struct componentname *cnp,
227 struct vattr *vap));
228static int nfs_removerpc __P((struct vnode *dvp, const char *name,
229 int namelen,
230 struct ucred *cred, struct proc *proc));
231static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr,
232 int fnamelen, struct vnode *tdvp,
233 const char *tnameptr, int tnamelen,
234 struct ucred *cred, struct proc *proc));
235static int nfs_renameit __P((struct vnode *sdvp,
236 struct componentname *scnp,
237 struct sillyrename *sp));
238
239/*
240 * Global variables
241 */
242extern u_int32_t nfs_true, nfs_false;
243extern u_int32_t nfs_xdrneg1;
244extern struct nfsstats nfsstats;
245extern nfstype nfsv3_type[9];
246struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
247struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
248int nfs_numasync = 0;
249#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
250
251SYSCTL_DECL(_vfs_nfs);
252
253static int nfsaccess_cache_timeout = 2;
254SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
255 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
256
257static int nfsaccess_cache_hits;
258SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
259 &nfsaccess_cache_hits, 0, "NFS ACCESS cache hit count");
260
261static int nfsaccess_cache_fills;
262SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_fills, CTLFLAG_RD,
263 &nfsaccess_cache_fills, 0, "NFS ACCESS cache fill count");
264
265/*
266 * nfs access vnode op.
267 * For nfs version 2, just return ok. File accesses may fail later.
268 * For nfs version 3, use the access rpc to check accessibility. If file modes
269 * are changed on the server, accesses might still fail later.
270 */
271static int
272nfs_access(ap)
273 struct vop_access_args /* {
274 struct vnode *a_vp;
275 int a_mode;
276 struct ucred *a_cred;
277 struct proc *a_p;
278 } */ *ap;
279{
280 register struct vnode *vp = ap->a_vp;
281 register u_int32_t *tl;
282 register caddr_t cp;
283 register int32_t t1, t2;
284 caddr_t bpos, dpos, cp2;
285 int error = 0, attrflag;
286 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
287 u_int32_t mode, rmode, wmode;
288 int v3 = NFS_ISV3(vp);
289 struct nfsnode *np = VTONFS(vp);
290
291 /*
292 * Disallow write attempts on filesystems mounted read-only;
293 * unless the file is a socket, fifo, or a block or character
294 * device resident on the filesystem.
295 */
296 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
297 switch (vp->v_type) {
298 case VREG:
299 case VDIR:
300 case VLNK:
301 return (EROFS);
302 default:
303 break;
304 }
305 }
306 /*
307 * For nfs v3, check to see if we have done this recently, and if
308 * so return our cached result instead of making an ACCESS call.
309 * If not, do an access rpc, otherwise you are stuck emulating
310 * ufs_access() locally using the vattr. This may not be correct,
311 * since the server may apply other access criteria such as
312 * client uid-->server uid mapping that we do not know about.
313 */
314 if (v3) {
315 if (ap->a_mode & VREAD)
316 mode = NFSV3ACCESS_READ;
317 else
318 mode = 0;
319 if (vp->v_type != VDIR) {
320 if (ap->a_mode & VWRITE)
321 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
322 if (ap->a_mode & VEXEC)
323 mode |= NFSV3ACCESS_EXECUTE;
324 } else {
325 if (ap->a_mode & VWRITE)
326 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
327 NFSV3ACCESS_DELETE);
328 if (ap->a_mode & VEXEC)
329 mode |= NFSV3ACCESS_LOOKUP;
330 }
331 /* XXX safety belt, only make blanket request if caching */
332 if (nfsaccess_cache_timeout > 0) {
333 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
334 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
335 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
336 } else {
337 wmode = mode;
338 }
339
340 /*
341 * Does our cached result allow us to give a definite yes to
342 * this request?
343 */
344 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
345 (ap->a_cred->cr_uid == np->n_modeuid) &&
346 ((np->n_mode & mode) == mode)) {
347 nfsaccess_cache_hits++;
348 } else {
349 /*
350 * Either a no, or a don't know. Go to the wire.
351 */
352 nfsstats.rpccnt[NFSPROC_ACCESS]++;
353 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
354 nfsm_fhtom(vp, v3);
355 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
356 *tl = txdr_unsigned(wmode);
357 nfsm_request(vp, NFSPROC_ACCESS, ap->a_p, ap->a_cred);
358 nfsm_postop_attr(vp, attrflag);
359 if (!error) {
360 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
361 rmode = fxdr_unsigned(u_int32_t, *tl);
362 /*
363 * The NFS V3 spec does not clarify whether or not
364 * the returned access bits can be a superset of
365 * the ones requested, so...
366 */
367 if ((rmode & mode) != mode) {
368 error = EACCES;
369 } else if (nfsaccess_cache_timeout > 0) {
370 /* cache the result */
371 nfsaccess_cache_fills++;
372 np->n_mode = rmode;
373 np->n_modeuid = ap->a_cred->cr_uid;
374 np->n_modestamp = time_second;
375 }
376 }
377 nfsm_reqdone;
378 }
379 return (error);
380 } else {
381 if ((error = nfsspec_access(ap)) != 0)
382 return (error);
383
384 /*
385 * Attempt to prevent a mapped root from accessing a file
386 * which it shouldn't. We try to read a byte from the file
387 * if the user is root and the file is not zero length.
388 * After calling nfsspec_access, we should have the correct
389 * file size cached.
390 */
391 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
392 && VTONFS(vp)->n_size > 0) {
393 struct iovec aiov;
394 struct uio auio;
395 char buf[1];
396
397 aiov.iov_base = buf;
398 aiov.iov_len = 1;
399 auio.uio_iov = &aiov;
400 auio.uio_iovcnt = 1;
401 auio.uio_offset = 0;
402 auio.uio_resid = 1;
403 auio.uio_segflg = UIO_SYSSPACE;
404 auio.uio_rw = UIO_READ;
405 auio.uio_procp = ap->a_p;
406
407 if (vp->v_type == VREG)
408 error = nfs_readrpc(vp, &auio, ap->a_cred);
409 else if (vp->v_type == VDIR) {
410 char* bp;
411 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
412 aiov.iov_base = bp;
413 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
414 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
415 free(bp, M_TEMP);
416 } else if (vp->v_type == VLNK)
417 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
418 else
419 error = EACCES;
420 }
421 return (error);
422 }
423}
424
425/*
426 * nfs open vnode op
427 * Check to see if the type is ok
428 * and that deletion is not in progress.
429 * For paged in text files, you will need to flush the page cache
430 * if consistency is lost.
431 */
432/* ARGSUSED */
433static int
434nfs_open(ap)
435 struct vop_open_args /* {
436 struct vnode *a_vp;
437 int a_mode;
438 struct ucred *a_cred;
439 struct proc *a_p;
440 } */ *ap;
441{
442 register struct vnode *vp = ap->a_vp;
443 struct nfsnode *np = VTONFS(vp);
444 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
445 struct vattr vattr;
446 int error;
447
448 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
449#ifdef DIAGNOSTIC
450 printf("open eacces vtyp=%d\n",vp->v_type);
451#endif
452 return (EACCES);
453 }
454 /*
455 * Get a valid lease. If cached data is stale, flush it.
456 */
457 if (nmp->nm_flag & NFSMNT_NQNFS) {
458 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
459 do {
460 error = nqnfs_getlease(vp, ND_READ, ap->a_cred,
461 ap->a_p);
462 } while (error == NQNFS_EXPIRED);
463 if (error)
464 return (error);
465 if (np->n_lrev != np->n_brev ||
466 (np->n_flag & NQNFSNONCACHE)) {
467 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
468 ap->a_p, 1)) == EINTR)
469 return (error);
470 np->n_brev = np->n_lrev;
471 }
472 }
473 } else {
474 if (np->n_flag & NMODIFIED) {
475 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
476 ap->a_p, 1)) == EINTR)
477 return (error);
478 np->n_attrstamp = 0;
479 if (vp->v_type == VDIR)
480 np->n_direofoffset = 0;
481 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
482 if (error)
483 return (error);
484 np->n_mtime = vattr.va_mtime.tv_sec;
485 } else {
486 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
487 if (error)
488 return (error);
489 if (np->n_mtime != vattr.va_mtime.tv_sec) {
490 if (vp->v_type == VDIR)
491 np->n_direofoffset = 0;
492 if ((error = nfs_vinvalbuf(vp, V_SAVE,
493 ap->a_cred, ap->a_p, 1)) == EINTR)
494 return (error);
495 np->n_mtime = vattr.va_mtime.tv_sec;
496 }
497 }
498 }
499 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
500 np->n_attrstamp = 0; /* For Open/Close consistency */
501 return (0);
502}
503
504/*
505 * nfs close vnode op
506 * What an NFS client should do upon close after writing is a debatable issue.
507 * Most NFS clients push delayed writes to the server upon close, basically for
508 * two reasons:
509 * 1 - So that any write errors may be reported back to the client process
510 * doing the close system call. By far the two most likely errors are
511 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
512 * 2 - To put a worst case upper bound on cache inconsistency between
513 * multiple clients for the file.
514 * There is also a consistency problem for Version 2 of the protocol w.r.t.
515 * not being able to tell if other clients are writing a file concurrently,
516 * since there is no way of knowing if the changed modify time in the reply
517 * is only due to the write for this client.
518 * (NFS Version 3 provides weak cache consistency data in the reply that
519 * should be sufficient to detect and handle this case.)
520 *
521 * The current code does the following:
522 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
523 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
524 * or commit them (this satisfies 1 and 2 except for the
525 * case where the server crashes after this close but
526 * before the commit RPC, which is felt to be "good
527 * enough". Changing the last argument to nfs_flush() to
528 * a 1 would force a commit operation, if it is felt a
529 * commit is necessary now.
530 * for NQNFS - do nothing now, since 2 is dealt with via leases and
531 * 1 should be dealt with via an fsync() system call for
532 * cases where write errors are important.
533 */
534/* ARGSUSED */
535static int
536nfs_close(ap)
537 struct vop_close_args /* {
538 struct vnodeop_desc *a_desc;
539 struct vnode *a_vp;
540 int a_fflag;
541 struct ucred *a_cred;
542 struct proc *a_p;
543 } */ *ap;
544{
545 register struct vnode *vp = ap->a_vp;
546 register struct nfsnode *np = VTONFS(vp);
547 int error = 0;
548
549 if (vp->v_type == VREG) {
550 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
551 (np->n_flag & NMODIFIED)) {
552 if (NFS_ISV3(vp)) {
553 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0);
554 np->n_flag &= ~NMODIFIED;
555 } else
556 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
557 np->n_attrstamp = 0;
558 }
559 if (np->n_flag & NWRITEERR) {
560 np->n_flag &= ~NWRITEERR;
561 error = np->n_error;
562 }
563 }
564 return (error);
565}
566
567/*
568 * nfs getattr call from vfs.
569 */
570static int
571nfs_getattr(ap)
572 struct vop_getattr_args /* {
573 struct vnode *a_vp;
574 struct vattr *a_vap;
575 struct ucred *a_cred;
576 struct proc *a_p;
577 } */ *ap;
578{
579 register struct vnode *vp = ap->a_vp;
580 register struct nfsnode *np = VTONFS(vp);
581 register caddr_t cp;
582 register u_int32_t *tl;
583 register int32_t t1, t2;
584 caddr_t bpos, dpos;
585 int error = 0;
586 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
587 int v3 = NFS_ISV3(vp);
588
589 /*
590 * Update local times for special files.
591 */
592 if (np->n_flag & (NACC | NUPD))
593 np->n_flag |= NCHG;
594 /*
595 * First look in the cache.
596 */
597 if (nfs_getattrcache(vp, ap->a_vap) == 0)
598 return (0);
599 nfsstats.rpccnt[NFSPROC_GETATTR]++;
600 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
601 nfsm_fhtom(vp, v3);
602 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred);
603 if (!error) {
604 nfsm_loadattr(vp, ap->a_vap);
605 }
606 nfsm_reqdone;
607 return (error);
608}
609
610/*
611 * nfs setattr call.
612 */
613static int
614nfs_setattr(ap)
615 struct vop_setattr_args /* {
616 struct vnodeop_desc *a_desc;
617 struct vnode *a_vp;
618 struct vattr *a_vap;
619 struct ucred *a_cred;
620 struct proc *a_p;
621 } */ *ap;
622{
623 register struct vnode *vp = ap->a_vp;
624 register struct nfsnode *np = VTONFS(vp);
625 register struct vattr *vap = ap->a_vap;
626 int error = 0;
627 u_quad_t tsize;
628
629#ifndef nolint
630 tsize = (u_quad_t)0;
631#endif
632
633 /*
634 * Setting of flags is not supported.
635 */
636 if (vap->va_flags != VNOVAL)
637 return (EOPNOTSUPP);
638
639 /*
640 * Disallow write attempts if the filesystem is mounted read-only.
641 */
642 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
643 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
644 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
645 (vp->v_mount->mnt_flag & MNT_RDONLY))
646 return (EROFS);
647 if (vap->va_size != VNOVAL) {
648 switch (vp->v_type) {
649 case VDIR:
650 return (EISDIR);
651 case VCHR:
652 case VBLK:
653 case VSOCK:
654 case VFIFO:
655 if (vap->va_mtime.tv_sec == VNOVAL &&
656 vap->va_atime.tv_sec == VNOVAL &&
657 vap->va_mode == (mode_t)VNOVAL &&
658 vap->va_uid == (uid_t)VNOVAL &&
659 vap->va_gid == (gid_t)VNOVAL)
660 return (0);
661 vap->va_size = VNOVAL;
662 break;
663 default:
664 /*
665 * Disallow write attempts if the filesystem is
666 * mounted read-only.
667 */
668 if (vp->v_mount->mnt_flag & MNT_RDONLY)
669 return (EROFS);
670 vnode_pager_setsize(vp, vap->va_size);
671 if (np->n_flag & NMODIFIED) {
672 if (vap->va_size == 0)
673 error = nfs_vinvalbuf(vp, 0,
674 ap->a_cred, ap->a_p, 1);
675 else
676 error = nfs_vinvalbuf(vp, V_SAVE,
677 ap->a_cred, ap->a_p, 1);
678 if (error) {
679 vnode_pager_setsize(vp, np->n_size);
680 return (error);
681 }
682 }
683 tsize = np->n_size;
684 np->n_size = np->n_vattr.va_size = vap->va_size;
685 };
686 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
687 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
688 vp->v_type == VREG &&
689 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
690 ap->a_p, 1)) == EINTR)
691 return (error);
692 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
693 if (error && vap->va_size != VNOVAL) {
694 np->n_size = np->n_vattr.va_size = tsize;
695 vnode_pager_setsize(vp, np->n_size);
696 }
697 return (error);
698}
699
700/*
701 * Do an nfs setattr rpc.
702 */
703static int
704nfs_setattrrpc(vp, vap, cred, procp)
705 register struct vnode *vp;
706 register struct vattr *vap;
707 struct ucred *cred;
708 struct proc *procp;
709{
710 register struct nfsv2_sattr *sp;
711 register caddr_t cp;
712 register int32_t t1, t2;
713 caddr_t bpos, dpos, cp2;
714 u_int32_t *tl;
715 int error = 0, wccflag = NFSV3_WCCRATTR;
716 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
717 int v3 = NFS_ISV3(vp);
718
719 nfsstats.rpccnt[NFSPROC_SETATTR]++;
720 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
721 nfsm_fhtom(vp, v3);
722 if (v3) {
723 nfsm_v3attrbuild(vap, TRUE);
724 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
725 *tl = nfs_false;
726 } else {
727 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
728 if (vap->va_mode == (mode_t)VNOVAL)
729 sp->sa_mode = nfs_xdrneg1;
730 else
731 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
732 if (vap->va_uid == (uid_t)VNOVAL)
733 sp->sa_uid = nfs_xdrneg1;
734 else
735 sp->sa_uid = txdr_unsigned(vap->va_uid);
736 if (vap->va_gid == (gid_t)VNOVAL)
737 sp->sa_gid = nfs_xdrneg1;
738 else
739 sp->sa_gid = txdr_unsigned(vap->va_gid);
740 sp->sa_size = txdr_unsigned(vap->va_size);
741 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
742 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
743 }
744 nfsm_request(vp, NFSPROC_SETATTR, procp, cred);
745 if (v3) {
746 nfsm_wcc_data(vp, wccflag);
747 } else
748 nfsm_loadattr(vp, (struct vattr *)0);
749 nfsm_reqdone;
750 return (error);
751}
752
753/*
754 * nfs lookup call, one step at a time...
755 * First look in cache
756 * If not found, unlock the directory nfsnode and do the rpc
757 */
758static int
759nfs_lookup(ap)
760 struct vop_lookup_args /* {
761 struct vnodeop_desc *a_desc;
762 struct vnode *a_dvp;
763 struct vnode **a_vpp;
764 struct componentname *a_cnp;
765 } */ *ap;
766{
767 struct componentname *cnp = ap->a_cnp;
768 struct vnode *dvp = ap->a_dvp;
769 struct vnode **vpp = ap->a_vpp;
770 int flags = cnp->cn_flags;
771 struct vnode *newvp;
772 u_int32_t *tl;
773 caddr_t cp;
774 int32_t t1, t2;
775 struct nfsmount *nmp;
776 caddr_t bpos, dpos, cp2;
777 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
778 long len;
779 nfsfh_t *fhp;
780 struct nfsnode *np;
781 int lockparent, wantparent, error = 0, attrflag, fhsize;
782 int v3 = NFS_ISV3(dvp);
783 struct proc *p = cnp->cn_proc;
784
785 *vpp = NULLVP;
786 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
787 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
788 return (EROFS);
789 if (dvp->v_type != VDIR)
790 return (ENOTDIR);
791 lockparent = flags & LOCKPARENT;
792 wantparent = flags & (LOCKPARENT|WANTPARENT);
793 nmp = VFSTONFS(dvp->v_mount);
794 np = VTONFS(dvp);
795 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
796 struct vattr vattr;
797 int vpid;
798
799 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) != 0) {
800 *vpp = NULLVP;
801 return (error);
802 }
803
804 newvp = *vpp;
805 vpid = newvp->v_id;
806 /*
807 * See the comment starting `Step through' in ufs/ufs_lookup.c
808 * for an explanation of the locking protocol
809 */
810 if (dvp == newvp) {
811 VREF(newvp);
812 error = 0;
813 } else if (flags & ISDOTDOT) {
814 VOP_UNLOCK(dvp, 0, p);
815 error = vget(newvp, LK_EXCLUSIVE, p);
816 if (!error && lockparent && (flags & ISLASTCN))
817 error = vn_lock(dvp, LK_EXCLUSIVE, p);
818 } else {
819 error = vget(newvp, LK_EXCLUSIVE, p);
820 if (!lockparent || error || !(flags & ISLASTCN))
821 VOP_UNLOCK(dvp, 0, p);
822 }
823 if (!error) {
824 if (vpid == newvp->v_id) {
825 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p)
826 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
827 nfsstats.lookupcache_hits++;
828 if (cnp->cn_nameiop != LOOKUP &&
829 (flags & ISLASTCN))
830 cnp->cn_flags |= SAVENAME;
831 return (0);
832 }
833 cache_purge(newvp);
834 }
835 vput(newvp);
836 if (lockparent && dvp != newvp && (flags & ISLASTCN))
837 VOP_UNLOCK(dvp, 0, p);
838 }
839 error = vn_lock(dvp, LK_EXCLUSIVE, p);
840 *vpp = NULLVP;
841 if (error)
842 return (error);
843 }
844 error = 0;
845 newvp = NULLVP;
846 nfsstats.lookupcache_misses++;
847 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
848 len = cnp->cn_namelen;
849 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
850 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
851 nfsm_fhtom(dvp, v3);
852 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
853 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred);
854 if (error) {
855 nfsm_postop_attr(dvp, attrflag);
856 m_freem(mrep);
857 goto nfsmout;
858 }
859 nfsm_getfh(fhp, fhsize, v3);
860
861 /*
862 * Handle RENAME case...
863 */
864 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
865 if (NFS_CMPFH(np, fhp, fhsize)) {
866 m_freem(mrep);
867 return (EISDIR);
868 }
869 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
870 if (error) {
871 m_freem(mrep);
872 return (error);
873 }
874 newvp = NFSTOV(np);
875 if (v3) {
876 nfsm_postop_attr(newvp, attrflag);
877 nfsm_postop_attr(dvp, attrflag);
878 } else
879 nfsm_loadattr(newvp, (struct vattr *)0);
880 *vpp = newvp;
881 m_freem(mrep);
882 cnp->cn_flags |= SAVENAME;
883 if (!lockparent)
884 VOP_UNLOCK(dvp, 0, p);
885 return (0);
886 }
887
888 if (flags & ISDOTDOT) {
889 VOP_UNLOCK(dvp, 0, p);
890 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
891 if (error) {
892 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
893 return (error);
894 }
895 newvp = NFSTOV(np);
896 if (lockparent && (flags & ISLASTCN) &&
897 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
898 vput(newvp);
899 return (error);
900 }
901 } else if (NFS_CMPFH(np, fhp, fhsize)) {
902 VREF(dvp);
903 newvp = dvp;
904 } else {
905 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
906 if (error) {
907 m_freem(mrep);
908 return (error);
909 }
910 if (!lockparent || !(flags & ISLASTCN))
911 VOP_UNLOCK(dvp, 0, p);
912 newvp = NFSTOV(np);
913 }
914 if (v3) {
915 nfsm_postop_attr(newvp, attrflag);
916 nfsm_postop_attr(dvp, attrflag);
917 } else
918 nfsm_loadattr(newvp, (struct vattr *)0);
919 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
920 cnp->cn_flags |= SAVENAME;
921 if ((cnp->cn_flags & MAKEENTRY) &&
922 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
923 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
924 cache_enter(dvp, newvp, cnp);
925 }
926 *vpp = newvp;
927 nfsm_reqdone;
928 if (error) {
929 if (newvp != NULLVP) {
930 vrele(newvp);
931 *vpp = NULLVP;
932 }
933 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
934 (flags & ISLASTCN) && error == ENOENT) {
935 if (!lockparent)
936 VOP_UNLOCK(dvp, 0, p);
937 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
938 error = EROFS;
939 else
940 error = EJUSTRETURN;
941 }
942 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
943 cnp->cn_flags |= SAVENAME;
944 }
945 return (error);
946}
947
948/*
949 * nfs read call.
950 * Just call nfs_bioread() to do the work.
951 */
952static int
953nfs_read(ap)
954 struct vop_read_args /* {
955 struct vnode *a_vp;
956 struct uio *a_uio;
957 int a_ioflag;
958 struct ucred *a_cred;
959 } */ *ap;
960{
961 register struct vnode *vp = ap->a_vp;
962
963 if (vp->v_type != VREG)
964 return (EPERM);
965 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
966}
967
968/*
969 * nfs readlink call
970 */
971static int
972nfs_readlink(ap)
973 struct vop_readlink_args /* {
974 struct vnode *a_vp;
975 struct uio *a_uio;
976 struct ucred *a_cred;
977 } */ *ap;
978{
979 register struct vnode *vp = ap->a_vp;
980
981 if (vp->v_type != VLNK)
982 return (EINVAL);
983 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred));
984}
985
986/*
987 * Do a readlink rpc.
988 * Called by nfs_doio() from below the buffer cache.
989 */
990int
991nfs_readlinkrpc(vp, uiop, cred)
992 register struct vnode *vp;
993 struct uio *uiop;
994 struct ucred *cred;
995{
996 register u_int32_t *tl;
997 register caddr_t cp;
998 register int32_t t1, t2;
999 caddr_t bpos, dpos, cp2;
1000 int error = 0, len, attrflag;
1001 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1002 int v3 = NFS_ISV3(vp);
1003
1004 nfsstats.rpccnt[NFSPROC_READLINK]++;
1005 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1006 nfsm_fhtom(vp, v3);
1007 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred);
1008 if (v3)
1009 nfsm_postop_attr(vp, attrflag);
1010 if (!error) {
1011 nfsm_strsiz(len, NFS_MAXPATHLEN);
1012 nfsm_mtouio(uiop, len);
1013 }
1014 nfsm_reqdone;
1015 return (error);
1016}
1017
1018/*
1019 * nfs read rpc call
1020 * Ditto above
1021 */
1022int
1023nfs_readrpc(vp, uiop, cred)
1024 register struct vnode *vp;
1025 struct uio *uiop;
1026 struct ucred *cred;
1027{
1028 register u_int32_t *tl;
1029 register caddr_t cp;
1030 register int32_t t1, t2;
1031 caddr_t bpos, dpos, cp2;
1032 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1033 struct nfsmount *nmp;
1034 int error = 0, len, retlen, tsiz, eof, attrflag;
1035 int v3 = NFS_ISV3(vp);
1036
1037#ifndef nolint
1038 eof = 0;
1039#endif
1040 nmp = VFSTONFS(vp->v_mount);
1041 tsiz = uiop->uio_resid;
1042 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1043 return (EFBIG);
1044 while (tsiz > 0) {
1045 nfsstats.rpccnt[NFSPROC_READ]++;
1046 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1047 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1048 nfsm_fhtom(vp, v3);
1049 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1050 if (v3) {
1051 txdr_hyper(&uiop->uio_offset, tl);
1052 *(tl + 2) = txdr_unsigned(len);
1053 } else {
1054 *tl++ = txdr_unsigned(uiop->uio_offset);
1055 *tl++ = txdr_unsigned(len);
1056 *tl = 0;
1057 }
1058 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred);
1059 if (v3) {
1060 nfsm_postop_attr(vp, attrflag);
1061 if (error) {
1062 m_freem(mrep);
1063 goto nfsmout;
1064 }
1065 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1066 eof = fxdr_unsigned(int, *(tl + 1));
1067 } else
1068 nfsm_loadattr(vp, (struct vattr *)0);
1069 nfsm_strsiz(retlen, nmp->nm_rsize);
1070 nfsm_mtouio(uiop, retlen);
1071 m_freem(mrep);
1072 tsiz -= retlen;
1073 if (v3) {
1074 if (eof || retlen == 0)
1075 tsiz = 0;
1076 } else if (retlen < len)
1077 tsiz = 0;
1078 }
1079nfsmout:
1080 return (error);
1081}
1082
1083/*
1084 * nfs write call
1085 */
1086int
1087nfs_writerpc(vp, uiop, cred, iomode, must_commit)
1088 register struct vnode *vp;
1089 register struct uio *uiop;
1090 struct ucred *cred;
1091 int *iomode, *must_commit;
1092{
1093 register u_int32_t *tl;
1094 register caddr_t cp;
1095 register int32_t t1, t2, backup;
1096 caddr_t bpos, dpos, cp2;
1097 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1098 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1099 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1100 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1101
1102#ifndef DIAGNOSTIC
1103 if (uiop->uio_iovcnt != 1)
1104 panic("nfs: writerpc iovcnt > 1");
1105#endif
1106 *must_commit = 0;
1107 tsiz = uiop->uio_resid;
1108 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1109 return (EFBIG);
1110 while (tsiz > 0) {
1111 nfsstats.rpccnt[NFSPROC_WRITE]++;
1112 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1113 nfsm_reqhead(vp, NFSPROC_WRITE,
1114 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1115 nfsm_fhtom(vp, v3);
1116 if (v3) {
1117 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1118 txdr_hyper(&uiop->uio_offset, tl);
1119 tl += 2;
1120 *tl++ = txdr_unsigned(len);
1121 *tl++ = txdr_unsigned(*iomode);
1122 *tl = txdr_unsigned(len);
1123 } else {
1124 register u_int32_t x;
1125
1126 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1127 /* Set both "begin" and "current" to non-garbage. */
1128 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1129 *tl++ = x; /* "begin offset" */
1130 *tl++ = x; /* "current offset" */
1131 x = txdr_unsigned(len);
1132 *tl++ = x; /* total to this offset */
1133 *tl = x; /* size of this write */
1134 }
1135 nfsm_uiotom(uiop, len);
1136 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred);
1137 if (v3) {
1138 wccflag = NFSV3_WCCCHK;
1139 nfsm_wcc_data(vp, wccflag);
1140 if (!error) {
1141 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1142 + NFSX_V3WRITEVERF);
1143 rlen = fxdr_unsigned(int, *tl++);
1144 if (rlen == 0) {
1145 error = NFSERR_IO;
1146 m_freem(mrep);
1147 break;
1148 } else if (rlen < len) {
1149 backup = len - rlen;
1150 uiop->uio_iov->iov_base -= backup;
1151 uiop->uio_iov->iov_len += backup;
1152 uiop->uio_offset -= backup;
1153 uiop->uio_resid += backup;
1154 len = rlen;
1155 }
1156 commit = fxdr_unsigned(int, *tl++);
1157
1158 /*
1159 * Return the lowest committment level
1160 * obtained by any of the RPCs.
1161 */
1162 if (committed == NFSV3WRITE_FILESYNC)
1163 committed = commit;
1164 else if (committed == NFSV3WRITE_DATASYNC &&
1165 commit == NFSV3WRITE_UNSTABLE)
1166 committed = commit;
1167 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1168 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1169 NFSX_V3WRITEVERF);
1170 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1171 } else if (bcmp((caddr_t)tl,
1172 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1173 *must_commit = 1;
1174 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1175 NFSX_V3WRITEVERF);
1176 }
1177 }
1178 } else
1179 nfsm_loadattr(vp, (struct vattr *)0);
1180 if (wccflag)
1181 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1182 m_freem(mrep);
1183 if (error)
1184 break;
1185 tsiz -= len;
1186 }
1187nfsmout:
1188 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1189 committed = NFSV3WRITE_FILESYNC;
1190 *iomode = committed;
1191 if (error)
1192 uiop->uio_resid = tsiz;
1193 return (error);
1194}
1195
1196/*
1197 * nfs mknod rpc
1198 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1199 * mode set to specify the file type and the size field for rdev.
1200 */
1201static int
1202nfs_mknodrpc(dvp, vpp, cnp, vap)
1203 register struct vnode *dvp;
1204 register struct vnode **vpp;
1205 register struct componentname *cnp;
1206 register struct vattr *vap;
1207{
1208 register struct nfsv2_sattr *sp;
1209 register u_int32_t *tl;
1210 register caddr_t cp;
1211 register int32_t t1, t2;
1212 struct vnode *newvp = (struct vnode *)0;
1213 struct nfsnode *np = (struct nfsnode *)0;
1214 struct vattr vattr;
1215 char *cp2;
1216 caddr_t bpos, dpos;
1217 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1218 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1219 u_int32_t rdev;
1220 int v3 = NFS_ISV3(dvp);
1221
1222 if (vap->va_type == VCHR || vap->va_type == VBLK)
1223 rdev = txdr_unsigned(vap->va_rdev);
1224 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1225 rdev = nfs_xdrneg1;
1226 else {
1227 VOP_ABORTOP(dvp, cnp);
1228 return (EOPNOTSUPP);
1229 }
1230 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1231 VOP_ABORTOP(dvp, cnp);
1232 return (error);
1233 }
1234 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1235 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1236 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1237 nfsm_fhtom(dvp, v3);
1238 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1239 if (v3) {
1240 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1241 *tl++ = vtonfsv3_type(vap->va_type);
1242 nfsm_v3attrbuild(vap, FALSE);
1243 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1244 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1245 *tl++ = txdr_unsigned(major(vap->va_rdev));
1246 *tl = txdr_unsigned(minor(vap->va_rdev));
1247 }
1248 } else {
1249 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1250 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1251 sp->sa_uid = nfs_xdrneg1;
1252 sp->sa_gid = nfs_xdrneg1;
1253 sp->sa_size = rdev;
1254 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1255 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1256 }
1257 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred);
1258 if (!error) {
1259 nfsm_mtofh(dvp, newvp, v3, gotvp);
1260 if (!gotvp) {
1261 if (newvp) {
1262 vput(newvp);
1263 newvp = (struct vnode *)0;
1264 }
1265 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1266 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1267 if (!error)
1268 newvp = NFSTOV(np);
1269 }
1270 }
1271 if (v3)
1272 nfsm_wcc_data(dvp, wccflag);
1273 nfsm_reqdone;
1274 if (error) {
1275 if (newvp)
1276 vput(newvp);
1277 } else {
1278 if (cnp->cn_flags & MAKEENTRY)
1279 cache_enter(dvp, newvp, cnp);
1280 *vpp = newvp;
1281 }
1282 zfree(namei_zone, cnp->cn_pnbuf);
1283 VTONFS(dvp)->n_flag |= NMODIFIED;
1284 if (!wccflag)
1285 VTONFS(dvp)->n_attrstamp = 0;
1286 return (error);
1287}
1288
1289/*
1290 * nfs mknod vop
1291 * just call nfs_mknodrpc() to do the work.
1292 */
1293/* ARGSUSED */
1294static int
1295nfs_mknod(ap)
1296 struct vop_mknod_args /* {
1297 struct vnode *a_dvp;
1298 struct vnode **a_vpp;
1299 struct componentname *a_cnp;
1300 struct vattr *a_vap;
1301 } */ *ap;
1302{
1303 struct vnode *newvp;
1304 int error;
1305
1306 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap);
1307 if (!error)
1308 vput(newvp);
1309 return (error);
1310}
1311
1312static u_long create_verf;
1313/*
1314 * nfs file create call
1315 */
1316static int
1317nfs_create(ap)
1318 struct vop_create_args /* {
1319 struct vnode *a_dvp;
1320 struct vnode **a_vpp;
1321 struct componentname *a_cnp;
1322 struct vattr *a_vap;
1323 } */ *ap;
1324{
1325 register struct vnode *dvp = ap->a_dvp;
1326 register struct vattr *vap = ap->a_vap;
1327 register struct componentname *cnp = ap->a_cnp;
1328 register struct nfsv2_sattr *sp;
1329 register u_int32_t *tl;
1330 register caddr_t cp;
1331 register int32_t t1, t2;
1332 struct nfsnode *np = (struct nfsnode *)0;
1333 struct vnode *newvp = (struct vnode *)0;
1334 caddr_t bpos, dpos, cp2;
1335 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1336 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1337 struct vattr vattr;
1338 int v3 = NFS_ISV3(dvp);
1339
1340 /*
1341 * Oops, not for me..
1342 */
1343 if (vap->va_type == VSOCK)
1344 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1345
1346 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1347 VOP_ABORTOP(dvp, cnp);
1348 return (error);
1349 }
1350 if (vap->va_vaflags & VA_EXCLUSIVE)
1351 fmode |= O_EXCL;
1352again:
1353 nfsstats.rpccnt[NFSPROC_CREATE]++;
1354 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1355 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1356 nfsm_fhtom(dvp, v3);
1357 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1358 if (v3) {
1359 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1360 if (fmode & O_EXCL) {
1361 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1362 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1363#ifdef INET
1364 if (!TAILQ_EMPTY(&in_ifaddrhead))
1365 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1366 else
1367#endif
1368 *tl++ = create_verf;
1369 *tl = ++create_verf;
1370 } else {
1371 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1372 nfsm_v3attrbuild(vap, FALSE);
1373 }
1374 } else {
1375 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1376 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1377 sp->sa_uid = nfs_xdrneg1;
1378 sp->sa_gid = nfs_xdrneg1;
1379 sp->sa_size = 0;
1380 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1381 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1382 }
1383 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred);
1384 if (!error) {
1385 nfsm_mtofh(dvp, newvp, v3, gotvp);
1386 if (!gotvp) {
1387 if (newvp) {
1388 vput(newvp);
1389 newvp = (struct vnode *)0;
1390 }
1391 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1392 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1393 if (!error)
1394 newvp = NFSTOV(np);
1395 }
1396 }
1397 if (v3)
1398 nfsm_wcc_data(dvp, wccflag);
1399 nfsm_reqdone;
1400 if (error) {
1401 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1402 fmode &= ~O_EXCL;
1403 goto again;
1404 }
1405 if (newvp)
1406 vput(newvp);
1407 } else if (v3 && (fmode & O_EXCL))
1408 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1409 if (!error) {
1410 if (cnp->cn_flags & MAKEENTRY)
1411 cache_enter(dvp, newvp, cnp);
1412 *ap->a_vpp = newvp;
1413 }
1414 if (error || (cnp->cn_flags & SAVESTART) == 0)
1415 zfree(namei_zone, cnp->cn_pnbuf);
1416 VTONFS(dvp)->n_flag |= NMODIFIED;
1417 if (!wccflag)
1418 VTONFS(dvp)->n_attrstamp = 0;
1419 return (error);
1420}
1421
1422/*
1423 * nfs file remove call
1424 * To try and make nfs semantics closer to ufs semantics, a file that has
1425 * other processes using the vnode is renamed instead of removed and then
1426 * removed later on the last close.
1427 * - If v_usecount > 1
1428 * If a rename is not already in the works
1429 * call nfs_sillyrename() to set it up
1430 * else
1431 * do the remove rpc
1432 */
1433static int
1434nfs_remove(ap)
1435 struct vop_remove_args /* {
1436 struct vnodeop_desc *a_desc;
1437 struct vnode * a_dvp;
1438 struct vnode * a_vp;
1439 struct componentname * a_cnp;
1440 } */ *ap;
1441{
1442 register struct vnode *vp = ap->a_vp;
1443 register struct vnode *dvp = ap->a_dvp;
1444 register struct componentname *cnp = ap->a_cnp;
1445 register struct nfsnode *np = VTONFS(vp);
1446 int error = 0;
1447 struct vattr vattr;
1448
1449#ifndef DIAGNOSTIC
1450 if ((cnp->cn_flags & HASBUF) == 0)
1451 panic("nfs_remove: no name");
1452 if (vp->v_usecount < 1)
1453 panic("nfs_remove: bad v_usecount");
1454#endif
1455 if (vp->v_type == VDIR)
1456 error = EPERM;
1457 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1458 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1459 vattr.va_nlink > 1)) {
1460 /*
1461 * Purge the name cache so that the chance of a lookup for
1462 * the name succeeding while the remove is in progress is
1463 * minimized. Without node locking it can still happen, such
1464 * that an I/O op returns ESTALE, but since you get this if
1465 * another host removes the file..
1466 */
1467 cache_purge(vp);
1468 /*
1469 * throw away biocache buffers, mainly to avoid
1470 * unnecessary delayed writes later.
1471 */
1472 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1);
1473 /* Do the rpc */
1474 if (error != EINTR)
1475 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1476 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1477 /*
1478 * Kludge City: If the first reply to the remove rpc is lost..
1479 * the reply to the retransmitted request will be ENOENT
1480 * since the file was in fact removed
1481 * Therefore, we cheat and return success.
1482 */
1483 if (error == ENOENT)
1484 error = 0;
1485 } else if (!np->n_sillyrename)
1486 error = nfs_sillyrename(dvp, vp, cnp);
1487 zfree(namei_zone, cnp->cn_pnbuf);
1488 np->n_attrstamp = 0;
1489 return (error);
1490}
1491
1492/*
1493 * nfs file remove rpc called from nfs_inactive
1494 */
1495int
1496nfs_removeit(sp)
1497 register struct sillyrename *sp;
1498{
1499
1500 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1501 (struct proc *)0));
1502}
1503
1504/*
1505 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1506 */
1507static int
1508nfs_removerpc(dvp, name, namelen, cred, proc)
1509 register struct vnode *dvp;
1510 const char *name;
1511 int namelen;
1512 struct ucred *cred;
1513 struct proc *proc;
1514{
1515 register u_int32_t *tl;
1516 register caddr_t cp;
1517 register int32_t t1, t2;
1518 caddr_t bpos, dpos, cp2;
1519 int error = 0, wccflag = NFSV3_WCCRATTR;
1520 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1521 int v3 = NFS_ISV3(dvp);
1522
1523 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1524 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1525 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1526 nfsm_fhtom(dvp, v3);
1527 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1528 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred);
1529 if (v3)
1530 nfsm_wcc_data(dvp, wccflag);
1531 nfsm_reqdone;
1532 VTONFS(dvp)->n_flag |= NMODIFIED;
1533 if (!wccflag)
1534 VTONFS(dvp)->n_attrstamp = 0;
1535 return (error);
1536}
1537
1538/*
1539 * nfs file rename call
1540 */
1541static int
1542nfs_rename(ap)
1543 struct vop_rename_args /* {
1544 struct vnode *a_fdvp;
1545 struct vnode *a_fvp;
1546 struct componentname *a_fcnp;
1547 struct vnode *a_tdvp;
1548 struct vnode *a_tvp;
1549 struct componentname *a_tcnp;
1550 } */ *ap;
1551{
1552 register struct vnode *fvp = ap->a_fvp;
1553 register struct vnode *tvp = ap->a_tvp;
1554 register struct vnode *fdvp = ap->a_fdvp;
1555 register struct vnode *tdvp = ap->a_tdvp;
1556 register struct componentname *tcnp = ap->a_tcnp;
1557 register struct componentname *fcnp = ap->a_fcnp;
1558 int error;
1559
1560#ifndef DIAGNOSTIC
1561 if ((tcnp->cn_flags & HASBUF) == 0 ||
1562 (fcnp->cn_flags & HASBUF) == 0)
1563 panic("nfs_rename: no name");
1564#endif
1565 /* Check for cross-device rename */
1566 if ((fvp->v_mount != tdvp->v_mount) ||
1567 (tvp && (fvp->v_mount != tvp->v_mount))) {
1568 error = EXDEV;
1569 goto out;
1570 }
1571
1572 /*
1573 * We have to flush B_DELWRI data prior to renaming
1574 * the file. If we don't, the delayed-write buffers
1575 * can be flushed out later after the file has gone stale
1576 * under NFSV3. NFSV2 does not have this problem because
1577 * ( as far as I can tell ) it flushes dirty buffers more
1578 * often.
1579 */
1580
1581 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_proc);
1582 if (tvp)
1583 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_proc);
1584
1585 /*
1586 * If the tvp exists and is in use, sillyrename it before doing the
1587 * rename of the new file over it.
1588 * XXX Can't sillyrename a directory.
1589 */
1590 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1591 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1592 vput(tvp);
1593 tvp = NULL;
1594 }
1595
1596 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1597 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1598 tcnp->cn_proc);
1599
1600 if (fvp->v_type == VDIR) {
1601 if (tvp != NULL && tvp->v_type == VDIR)
1602 cache_purge(tdvp);
1603 cache_purge(fdvp);
1604 }
1605
1606out:
1607 VOP_ABORTOP(tdvp, tcnp);
1608 if (tdvp == tvp)
1609 vrele(tdvp);
1610 else
1611 vput(tdvp);
1612 if (tvp)
1613 vput(tvp);
1614 VOP_ABORTOP(fdvp, fcnp);
1615 vrele(fdvp);
1616 vrele(fvp);
1617 /*
1618 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1619 */
1620 if (error == ENOENT)
1621 error = 0;
1622 return (error);
1623}
1624
1625/*
1626 * nfs file rename rpc called from nfs_remove() above
1627 */
1628static int
1629nfs_renameit(sdvp, scnp, sp)
1630 struct vnode *sdvp;
1631 struct componentname *scnp;
1632 register struct sillyrename *sp;
1633{
1634 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1635 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc));
1636}
1637
1638/*
1639 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1640 */
1641static int
1642nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc)
1643 register struct vnode *fdvp;
1644 const char *fnameptr;
1645 int fnamelen;
1646 register struct vnode *tdvp;
1647 const char *tnameptr;
1648 int tnamelen;
1649 struct ucred *cred;
1650 struct proc *proc;
1651{
1652 register u_int32_t *tl;
1653 register caddr_t cp;
1654 register int32_t t1, t2;
1655 caddr_t bpos, dpos, cp2;
1656 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1657 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1658 int v3 = NFS_ISV3(fdvp);
1659
1660 nfsstats.rpccnt[NFSPROC_RENAME]++;
1661 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1662 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1663 nfsm_rndup(tnamelen));
1664 nfsm_fhtom(fdvp, v3);
1665 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1666 nfsm_fhtom(tdvp, v3);
1667 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1668 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred);
1669 if (v3) {
1670 nfsm_wcc_data(fdvp, fwccflag);
1671 nfsm_wcc_data(tdvp, twccflag);
1672 }
1673 nfsm_reqdone;
1674 VTONFS(fdvp)->n_flag |= NMODIFIED;
1675 VTONFS(tdvp)->n_flag |= NMODIFIED;
1676 if (!fwccflag)
1677 VTONFS(fdvp)->n_attrstamp = 0;
1678 if (!twccflag)
1679 VTONFS(tdvp)->n_attrstamp = 0;
1680 return (error);
1681}
1682
1683/*
1684 * nfs hard link create call
1685 */
1686static int
1687nfs_link(ap)
1688 struct vop_link_args /* {
1689 struct vnode *a_tdvp;
1690 struct vnode *a_vp;
1691 struct componentname *a_cnp;
1692 } */ *ap;
1693{
1694 register struct vnode *vp = ap->a_vp;
1695 register struct vnode *tdvp = ap->a_tdvp;
1696 register struct componentname *cnp = ap->a_cnp;
1697 register u_int32_t *tl;
1698 register caddr_t cp;
1699 register int32_t t1, t2;
1700 caddr_t bpos, dpos, cp2;
1701 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1702 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1703 int v3;
1704
1705 if (vp->v_mount != tdvp->v_mount) {
1706 VOP_ABORTOP(tdvp, cnp);
1707 return (EXDEV);
1708 }
1709
1710 /*
1711 * Push all writes to the server, so that the attribute cache
1712 * doesn't get "out of sync" with the server.
1713 * XXX There should be a better way!
1714 */
1715 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc);
1716
1717 v3 = NFS_ISV3(vp);
1718 nfsstats.rpccnt[NFSPROC_LINK]++;
1719 nfsm_reqhead(vp, NFSPROC_LINK,
1720 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1721 nfsm_fhtom(vp, v3);
1722 nfsm_fhtom(tdvp, v3);
1723 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1724 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred);
1725 if (v3) {
1726 nfsm_postop_attr(vp, attrflag);
1727 nfsm_wcc_data(tdvp, wccflag);
1728 }
1729 nfsm_reqdone;
1730 zfree(namei_zone, cnp->cn_pnbuf);
1731 VTONFS(tdvp)->n_flag |= NMODIFIED;
1732 if (!attrflag)
1733 VTONFS(vp)->n_attrstamp = 0;
1734 if (!wccflag)
1735 VTONFS(tdvp)->n_attrstamp = 0;
1736 /*
1737 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1738 */
1739 if (error == EEXIST)
1740 error = 0;
1741 return (error);
1742}
1743
1744/*
1745 * nfs symbolic link create call
1746 */
1747static int
1748nfs_symlink(ap)
1749 struct vop_symlink_args /* {
1750 struct vnode *a_dvp;
1751 struct vnode **a_vpp;
1752 struct componentname *a_cnp;
1753 struct vattr *a_vap;
1754 char *a_target;
1755 } */ *ap;
1756{
1757 register struct vnode *dvp = ap->a_dvp;
1758 register struct vattr *vap = ap->a_vap;
1759 register struct componentname *cnp = ap->a_cnp;
1760 register struct nfsv2_sattr *sp;
1761 register u_int32_t *tl;
1762 register caddr_t cp;
1763 register int32_t t1, t2;
1764 caddr_t bpos, dpos, cp2;
1765 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1766 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1767 struct vnode *newvp = (struct vnode *)0;
1768 int v3 = NFS_ISV3(dvp);
1769
1770 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1771 slen = strlen(ap->a_target);
1772 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1773 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1774 nfsm_fhtom(dvp, v3);
1775 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1776 if (v3) {
1777 nfsm_v3attrbuild(vap, FALSE);
1778 }
1779 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1780 if (!v3) {
1781 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1782 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1783 sp->sa_uid = nfs_xdrneg1;
1784 sp->sa_gid = nfs_xdrneg1;
1785 sp->sa_size = nfs_xdrneg1;
1786 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1787 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1788 }
1789 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred);
1790 if (v3) {
1791 if (!error)
1792 nfsm_mtofh(dvp, newvp, v3, gotvp);
1793 nfsm_wcc_data(dvp, wccflag);
1794 }
1795 nfsm_reqdone;
1796 if (newvp)
1797 vput(newvp);
1798 VTONFS(dvp)->n_flag |= NMODIFIED;
1799 if (!wccflag)
1800 VTONFS(dvp)->n_attrstamp = 0;
1801 /*
1802 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1803 */
1804 if (error == EEXIST)
1805 error = 0;
1806 /*
1807 * cnp's buffer expected to be freed if SAVESTART not set or
1808 * if an error was returned.
1809 */
1810 if (error || (cnp->cn_flags & SAVESTART) == 0)
1811 zfree(namei_zone, cnp->cn_pnbuf);
1812 return (error);
1813}
1814
1815/*
1816 * nfs make dir call
1817 */
1818static int
1819nfs_mkdir(ap)
1820 struct vop_mkdir_args /* {
1821 struct vnode *a_dvp;
1822 struct vnode **a_vpp;
1823 struct componentname *a_cnp;
1824 struct vattr *a_vap;
1825 } */ *ap;
1826{
1827 register struct vnode *dvp = ap->a_dvp;
1828 register struct vattr *vap = ap->a_vap;
1829 register struct componentname *cnp = ap->a_cnp;
1830 register struct nfsv2_sattr *sp;
1831 register u_int32_t *tl;
1832 register caddr_t cp;
1833 register int32_t t1, t2;
1834 register int len;
1835 struct nfsnode *np = (struct nfsnode *)0;
1836 struct vnode *newvp = (struct vnode *)0;
1837 caddr_t bpos, dpos, cp2;
1838 int error = 0, wccflag = NFSV3_WCCRATTR;
1839 int gotvp = 0;
1840 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1841 struct vattr vattr;
1842 int v3 = NFS_ISV3(dvp);
1843
1844 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) != 0) {
1845 VOP_ABORTOP(dvp, cnp);
1846 return (error);
1847 }
1848 len = cnp->cn_namelen;
1849 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1850 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1851 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1852 nfsm_fhtom(dvp, v3);
1853 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1854 if (v3) {
1855 nfsm_v3attrbuild(vap, FALSE);
1856 } else {
1857 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1858 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1859 sp->sa_uid = nfs_xdrneg1;
1860 sp->sa_gid = nfs_xdrneg1;
1861 sp->sa_size = nfs_xdrneg1;
1862 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1863 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1864 }
1865 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred);
1866 if (!error)
1867 nfsm_mtofh(dvp, newvp, v3, gotvp);
1868 if (v3)
1869 nfsm_wcc_data(dvp, wccflag);
1870 nfsm_reqdone;
1871 VTONFS(dvp)->n_flag |= NMODIFIED;
1872 if (!wccflag)
1873 VTONFS(dvp)->n_attrstamp = 0;
1874 /*
1875 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1876 * if we can succeed in looking up the directory.
1877 */
1878 if (error == EEXIST || (!error && !gotvp)) {
1879 if (newvp) {
1880 vrele(newvp);
1881 newvp = (struct vnode *)0;
1882 }
1883 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1884 cnp->cn_proc, &np);
1885 if (!error) {
1886 newvp = NFSTOV(np);
1887 if (newvp->v_type != VDIR)
1888 error = EEXIST;
1889 }
1890 }
1891 if (error) {
1892 if (newvp)
1893 vrele(newvp);
1894 } else
1895 *ap->a_vpp = newvp;
1896 if (error || (cnp->cn_flags & SAVESTART) == 0)
1897 zfree(namei_zone, cnp->cn_pnbuf);
1898 return (error);
1899}
1900
1901/*
1902 * nfs remove directory call
1903 */
1904static int
1905nfs_rmdir(ap)
1906 struct vop_rmdir_args /* {
1907 struct vnode *a_dvp;
1908 struct vnode *a_vp;
1909 struct componentname *a_cnp;
1910 } */ *ap;
1911{
1912 register struct vnode *vp = ap->a_vp;
1913 register struct vnode *dvp = ap->a_dvp;
1914 register struct componentname *cnp = ap->a_cnp;
1915 register u_int32_t *tl;
1916 register caddr_t cp;
1917 register int32_t t1, t2;
1918 caddr_t bpos, dpos, cp2;
1919 int error = 0, wccflag = NFSV3_WCCRATTR;
1920 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1921 int v3 = NFS_ISV3(dvp);
1922
1923 if (dvp == vp)
1924 return (EINVAL);
1925 nfsstats.rpccnt[NFSPROC_RMDIR]++;
1926 nfsm_reqhead(dvp, NFSPROC_RMDIR,
1927 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1928 nfsm_fhtom(dvp, v3);
1929 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1930 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred);
1931 if (v3)
1932 nfsm_wcc_data(dvp, wccflag);
1933 nfsm_reqdone;
1934 zfree(namei_zone, cnp->cn_pnbuf);
1935 VTONFS(dvp)->n_flag |= NMODIFIED;
1936 if (!wccflag)
1937 VTONFS(dvp)->n_attrstamp = 0;
1938 cache_purge(dvp);
1939 cache_purge(vp);
1940 /*
1941 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
1942 */
1943 if (error == ENOENT)
1944 error = 0;
1945 return (error);
1946}
1947
1948/*
1949 * nfs readdir call
1950 */
1951static int
1952nfs_readdir(ap)
1953 struct vop_readdir_args /* {
1954 struct vnode *a_vp;
1955 struct uio *a_uio;
1956 struct ucred *a_cred;
1957 } */ *ap;
1958{
1959 register struct vnode *vp = ap->a_vp;
1960 register struct nfsnode *np = VTONFS(vp);
1961 register struct uio *uio = ap->a_uio;
1962 int tresid, error;
1963 struct vattr vattr;
1964
1965 if (vp->v_type != VDIR)
1966 return (EPERM);
1967 /*
1968 * First, check for hit on the EOF offset cache
1969 */
1970 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
1971 (np->n_flag & NMODIFIED) == 0) {
1972 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
1973 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
1974 nfsstats.direofcache_hits++;
1975 return (0);
1976 }
1977 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
1978 np->n_mtime == vattr.va_mtime.tv_sec) {
1979 nfsstats.direofcache_hits++;
1980 return (0);
1981 }
1982 }
1983
1984 /*
1985 * Call nfs_bioread() to do the real work.
1986 */
1987 tresid = uio->uio_resid;
1988 error = nfs_bioread(vp, uio, 0, ap->a_cred);
1989
1990 if (!error && uio->uio_resid == tresid)
1991 nfsstats.direofcache_misses++;
1992 return (error);
1993}
1994
1995/*
1996 * Readdir rpc call.
1997 * Called from below the buffer cache by nfs_doio().
1998 */
1999int
2000nfs_readdirrpc(vp, uiop, cred)
2001 struct vnode *vp;
2002 register struct uio *uiop;
2003 struct ucred *cred;
2004
2005{
2006 register int len, left;
2007 register struct dirent *dp = NULL;
2008 register u_int32_t *tl;
2009 register caddr_t cp;
2010 register int32_t t1, t2;
2011 register nfsuint64 *cookiep;
2012 caddr_t bpos, dpos, cp2;
2013 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2014 nfsuint64 cookie;
2015 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2016 struct nfsnode *dnp = VTONFS(vp);
2017 u_quad_t fileno;
2018 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2019 int attrflag;
2020 int v3 = NFS_ISV3(vp);
2021
2022#ifndef DIAGNOSTIC
2023 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2024 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2025 panic("nfs readdirrpc bad uio");
2026#endif
2027
2028 /*
2029 * If there is no cookie, assume directory was stale.
2030 */
2031 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2032 if (cookiep)
2033 cookie = *cookiep;
2034 else
2035 return (NFSERR_BAD_COOKIE);
2036 /*
2037 * Loop around doing readdir rpc's of size nm_readdirsize
2038 * truncated to a multiple of DIRBLKSIZ.
2039 * The stopping criteria is EOF or buffer full.
2040 */
2041 while (more_dirs && bigenough) {
2042 nfsstats.rpccnt[NFSPROC_READDIR]++;
2043 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2044 NFSX_READDIR(v3));
2045 nfsm_fhtom(vp, v3);
2046 if (v3) {
2047 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2048 *tl++ = cookie.nfsuquad[0];
2049 *tl++ = cookie.nfsuquad[1];
2050 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2051 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2052 } else {
2053 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2054 *tl++ = cookie.nfsuquad[0];
2055 }
2056 *tl = txdr_unsigned(nmp->nm_readdirsize);
2057 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred);
2058 if (v3) {
2059 nfsm_postop_attr(vp, attrflag);
2060 if (!error) {
2061 nfsm_dissect(tl, u_int32_t *,
2062 2 * NFSX_UNSIGNED);
2063 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2064 dnp->n_cookieverf.nfsuquad[1] = *tl;
2065 } else {
2066 m_freem(mrep);
2067 goto nfsmout;
2068 }
2069 }
2070 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2071 more_dirs = fxdr_unsigned(int, *tl);
2072
2073 /* loop thru the dir entries, doctoring them to 4bsd form */
2074 while (more_dirs && bigenough) {
2075 if (v3) {
2076 nfsm_dissect(tl, u_int32_t *,
2077 3 * NFSX_UNSIGNED);
2078 fxdr_hyper(tl, &fileno);
2079 len = fxdr_unsigned(int, *(tl + 2));
2080 } else {
2081 nfsm_dissect(tl, u_int32_t *,
2082 2 * NFSX_UNSIGNED);
2083 fileno = fxdr_unsigned(u_quad_t, *tl++);
2084 len = fxdr_unsigned(int, *tl);
2085 }
2086 if (len <= 0 || len > NFS_MAXNAMLEN) {
2087 error = EBADRPC;
2088 m_freem(mrep);
2089 goto nfsmout;
2090 }
2091 tlen = nfsm_rndup(len);
2092 if (tlen == len)
2093 tlen += 4; /* To ensure null termination */
2094 left = DIRBLKSIZ - blksiz;
2095 if ((tlen + DIRHDSIZ) > left) {
2096 dp->d_reclen += left;
2097 uiop->uio_iov->iov_base += left;
2098 uiop->uio_iov->iov_len -= left;
2099 uiop->uio_offset += left;
2100 uiop->uio_resid -= left;
2101 blksiz = 0;
2102 }
2103 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2104 bigenough = 0;
2105 if (bigenough) {
2106 dp = (struct dirent *)uiop->uio_iov->iov_base;
2107 dp->d_fileno = (int)fileno;
2108 dp->d_namlen = len;
2109 dp->d_reclen = tlen + DIRHDSIZ;
2110 dp->d_type = DT_UNKNOWN;
2111 blksiz += dp->d_reclen;
2112 if (blksiz == DIRBLKSIZ)
2113 blksiz = 0;
2114 uiop->uio_offset += DIRHDSIZ;
2115 uiop->uio_resid -= DIRHDSIZ;
2116 uiop->uio_iov->iov_base += DIRHDSIZ;
2117 uiop->uio_iov->iov_len -= DIRHDSIZ;
2118 nfsm_mtouio(uiop, len);
2119 cp = uiop->uio_iov->iov_base;
2120 tlen -= len;
2121 *cp = '\0'; /* null terminate */
2122 uiop->uio_iov->iov_base += tlen;
2123 uiop->uio_iov->iov_len -= tlen;
2124 uiop->uio_offset += tlen;
2125 uiop->uio_resid -= tlen;
2126 } else
2127 nfsm_adv(nfsm_rndup(len));
2128 if (v3) {
2129 nfsm_dissect(tl, u_int32_t *,
2130 3 * NFSX_UNSIGNED);
2131 } else {
2132 nfsm_dissect(tl, u_int32_t *,
2133 2 * NFSX_UNSIGNED);
2134 }
2135 if (bigenough) {
2136 cookie.nfsuquad[0] = *tl++;
2137 if (v3)
2138 cookie.nfsuquad[1] = *tl++;
2139 } else if (v3)
2140 tl += 2;
2141 else
2142 tl++;
2143 more_dirs = fxdr_unsigned(int, *tl);
2144 }
2145 /*
2146 * If at end of rpc data, get the eof boolean
2147 */
2148 if (!more_dirs) {
2149 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2150 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2151 }
2152 m_freem(mrep);
2153 }
2154 /*
2155 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2156 * by increasing d_reclen for the last record.
2157 */
2158 if (blksiz > 0) {
2159 left = DIRBLKSIZ - blksiz;
2160 dp->d_reclen += left;
2161 uiop->uio_iov->iov_base += left;
2162 uiop->uio_iov->iov_len -= left;
2163 uiop->uio_offset += left;
2164 uiop->uio_resid -= left;
2165 }
2166
2167 /*
2168 * We are now either at the end of the directory or have filled the
2169 * block.
2170 */
2171 if (bigenough)
2172 dnp->n_direofoffset = uiop->uio_offset;
2173 else {
2174 if (uiop->uio_resid > 0)
2175 printf("EEK! readdirrpc resid > 0\n");
2176 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2177 *cookiep = cookie;
2178 }
2179nfsmout:
2180 return (error);
2181}
2182
2183/*
2184 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2185 */
2186int
2187nfs_readdirplusrpc(vp, uiop, cred)
2188 struct vnode *vp;
2189 register struct uio *uiop;
2190 struct ucred *cred;
2191{
2192 register int len, left;
2193 register struct dirent *dp;
2194 register u_int32_t *tl;
2195 register caddr_t cp;
2196 register int32_t t1, t2;
2197 register struct vnode *newvp;
2198 register nfsuint64 *cookiep;
2199 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2200 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2201 struct nameidata nami, *ndp = &nami;
2202 struct componentname *cnp = &ndp->ni_cnd;
2203 nfsuint64 cookie;
2204 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2205 struct nfsnode *dnp = VTONFS(vp), *np;
2206 nfsfh_t *fhp;
2207 u_quad_t fileno;
2208 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2209 int attrflag, fhsize;
2210
2211#ifndef nolint
2212 dp = (struct dirent *)0;
2213#endif
2214#ifndef DIAGNOSTIC
2215 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2216 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2217 panic("nfs readdirplusrpc bad uio");
2218#endif
2219 ndp->ni_dvp = vp;
2220 newvp = NULLVP;
2221
2222 /*
2223 * If there is no cookie, assume directory was stale.
2224 */
2225 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2226 if (cookiep)
2227 cookie = *cookiep;
2228 else
2229 return (NFSERR_BAD_COOKIE);
2230 /*
2231 * Loop around doing readdir rpc's of size nm_readdirsize
2232 * truncated to a multiple of DIRBLKSIZ.
2233 * The stopping criteria is EOF or buffer full.
2234 */
2235 while (more_dirs && bigenough) {
2236 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2237 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2238 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2239 nfsm_fhtom(vp, 1);
2240 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2241 *tl++ = cookie.nfsuquad[0];
2242 *tl++ = cookie.nfsuquad[1];
2243 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2244 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2245 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2246 *tl = txdr_unsigned(nmp->nm_rsize);
2247 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred);
2248 nfsm_postop_attr(vp, attrflag);
2249 if (error) {
2250 m_freem(mrep);
2251 goto nfsmout;
2252 }
2253 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2254 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2255 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2256 more_dirs = fxdr_unsigned(int, *tl);
2257
2258 /* loop thru the dir entries, doctoring them to 4bsd form */
2259 while (more_dirs && bigenough) {
2260 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2261 fxdr_hyper(tl, &fileno);
2262 len = fxdr_unsigned(int, *(tl + 2));
2263 if (len <= 0 || len > NFS_MAXNAMLEN) {
2264 error = EBADRPC;
2265 m_freem(mrep);
2266 goto nfsmout;
2267 }
2268 tlen = nfsm_rndup(len);
2269 if (tlen == len)
2270 tlen += 4; /* To ensure null termination*/
2271 left = DIRBLKSIZ - blksiz;
2272 if ((tlen + DIRHDSIZ) > left) {
2273 dp->d_reclen += left;
2274 uiop->uio_iov->iov_base += left;
2275 uiop->uio_iov->iov_len -= left;
2276 uiop->uio_offset += left;
2277 uiop->uio_resid -= left;
2278 blksiz = 0;
2279 }
2280 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2281 bigenough = 0;
2282 if (bigenough) {
2283 dp = (struct dirent *)uiop->uio_iov->iov_base;
2284 dp->d_fileno = (int)fileno;
2285 dp->d_namlen = len;
2286 dp->d_reclen = tlen + DIRHDSIZ;
2287 dp->d_type = DT_UNKNOWN;
2288 blksiz += dp->d_reclen;
2289 if (blksiz == DIRBLKSIZ)
2290 blksiz = 0;
2291 uiop->uio_offset += DIRHDSIZ;
2292 uiop->uio_resid -= DIRHDSIZ;
2293 uiop->uio_iov->iov_base += DIRHDSIZ;
2294 uiop->uio_iov->iov_len -= DIRHDSIZ;
2295 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2296 cnp->cn_namelen = len;
2297 nfsm_mtouio(uiop, len);
2298 cp = uiop->uio_iov->iov_base;
2299 tlen -= len;
2300 *cp = '\0';
2301 uiop->uio_iov->iov_base += tlen;
2302 uiop->uio_iov->iov_len -= tlen;
2303 uiop->uio_offset += tlen;
2304 uiop->uio_resid -= tlen;
2305 } else
2306 nfsm_adv(nfsm_rndup(len));
2307 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2308 if (bigenough) {
2309 cookie.nfsuquad[0] = *tl++;
2310 cookie.nfsuquad[1] = *tl++;
2311 } else
2312 tl += 2;
2313
2314 /*
2315 * Since the attributes are before the file handle
2316 * (sigh), we must skip over the attributes and then
2317 * come back and get them.
2318 */
2319 attrflag = fxdr_unsigned(int, *tl);
2320 if (attrflag) {
2321 dpossav1 = dpos;
2322 mdsav1 = md;
2323 nfsm_adv(NFSX_V3FATTR);
2324 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2325 doit = fxdr_unsigned(int, *tl);
2326 if (doit) {
2327 nfsm_getfh(fhp, fhsize, 1);
2328 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2329 VREF(vp);
2330 newvp = vp;
2331 np = dnp;
2332 } else {
2333 error = nfs_nget(vp->v_mount, fhp,
2334 fhsize, &np);
2335 if (error)
2336 doit = 0;
2337 else
2338 newvp = NFSTOV(np);
2339 }
2340 }
2341 if (doit) {
2342 dpossav2 = dpos;
2343 dpos = dpossav1;
2344 mdsav2 = md;
2345 md = mdsav1;
2346 nfsm_loadattr(newvp, (struct vattr *)0);
2347 dpos = dpossav2;
2348 md = mdsav2;
2349 dp->d_type =
2350 IFTODT(VTTOIF(np->n_vattr.va_type));
2351 ndp->ni_vp = newvp;
2352 cnp->cn_hash = 0;
2353 for (cp = cnp->cn_nameptr, i = 1; i <= len;
2354 i++, cp++)
2355 cnp->cn_hash += (unsigned char)*cp * i;
2356 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2357 }
2358 } else {
2359 /* Just skip over the file handle */
2360 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2361 i = fxdr_unsigned(int, *tl);
2362 nfsm_adv(nfsm_rndup(i));
2363 }
2364 if (newvp != NULLVP) {
2365 vrele(newvp);
2366 newvp = NULLVP;
2367 }
2368 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2369 more_dirs = fxdr_unsigned(int, *tl);
2370 }
2371 /*
2372 * If at end of rpc data, get the eof boolean
2373 */
2374 if (!more_dirs) {
2375 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2376 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2377 }
2378 m_freem(mrep);
2379 }
2380 /*
2381 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2382 * by increasing d_reclen for the last record.
2383 */
2384 if (blksiz > 0) {
2385 left = DIRBLKSIZ - blksiz;
2386 dp->d_reclen += left;
2387 uiop->uio_iov->iov_base += left;
2388 uiop->uio_iov->iov_len -= left;
2389 uiop->uio_offset += left;
2390 uiop->uio_resid -= left;
2391 }
2392
2393 /*
2394 * We are now either at the end of the directory or have filled the
2395 * block.
2396 */
2397 if (bigenough)
2398 dnp->n_direofoffset = uiop->uio_offset;
2399 else {
2400 if (uiop->uio_resid > 0)
2401 printf("EEK! readdirplusrpc resid > 0\n");
2402 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2403 *cookiep = cookie;
2404 }
2405nfsmout:
2406 if (newvp != NULLVP) {
2407 if (newvp == vp)
2408 vrele(newvp);
2409 else
2410 vput(newvp);
2411 newvp = NULLVP;
2412 }
2413 return (error);
2414}
2415
2416/*
2417 * Silly rename. To make the NFS filesystem that is stateless look a little
2418 * more like the "ufs" a remove of an active vnode is translated to a rename
2419 * to a funny looking filename that is removed by nfs_inactive on the
2420 * nfsnode. There is the potential for another process on a different client
2421 * to create the same funny name between the nfs_lookitup() fails and the
2422 * nfs_rename() completes, but...
2423 */
2424static int
2425nfs_sillyrename(dvp, vp, cnp)
2426 struct vnode *dvp, *vp;
2427 struct componentname *cnp;
2428{
2429 register struct sillyrename *sp;
2430 struct nfsnode *np;
2431 int error;
2432 short pid;
2433
2434 cache_purge(dvp);
2435 np = VTONFS(vp);
2436#ifndef DIAGNOSTIC
2437 if (vp->v_type == VDIR)
2438 panic("nfs: sillyrename dir");
2439#endif
2440 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2441 M_NFSREQ, M_WAITOK);
2442 sp->s_cred = crdup(cnp->cn_cred);
2443 sp->s_dvp = dvp;
2444 VREF(dvp);
2445
2446 /* Fudge together a funny name */
2447 pid = cnp->cn_proc->p_pid;
2448 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2449
2450 /* Try lookitups until we get one that isn't there */
2451 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2452 cnp->cn_proc, (struct nfsnode **)0) == 0) {
2453 sp->s_name[4]++;
2454 if (sp->s_name[4] > 'z') {
2455 error = EINVAL;
2456 goto bad;
2457 }
2458 }
2459 error = nfs_renameit(dvp, cnp, sp);
2460 if (error)
2461 goto bad;
2462 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2463 cnp->cn_proc, &np);
2464 np->n_sillyrename = sp;
2465 return (0);
2466bad:
2467 vrele(sp->s_dvp);
2468 crfree(sp->s_cred);
2469 free((caddr_t)sp, M_NFSREQ);
2470 return (error);
2471}
2472
2473/*
2474 * Look up a file name and optionally either update the file handle or
2475 * allocate an nfsnode, depending on the value of npp.
2476 * npp == NULL --> just do the lookup
2477 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2478 * handled too
2479 * *npp != NULL --> update the file handle in the vnode
2480 */
2481static int
2482nfs_lookitup(dvp, name, len, cred, procp, npp)
2483 register struct vnode *dvp;
2484 const char *name;
2485 int len;
2486 struct ucred *cred;
2487 struct proc *procp;
2488 struct nfsnode **npp;
2489{
2490 register u_int32_t *tl;
2491 register caddr_t cp;
2492 register int32_t t1, t2;
2493 struct vnode *newvp = (struct vnode *)0;
2494 struct nfsnode *np, *dnp = VTONFS(dvp);
2495 caddr_t bpos, dpos, cp2;
2496 int error = 0, fhlen, attrflag;
2497 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2498 nfsfh_t *nfhp;
2499 int v3 = NFS_ISV3(dvp);
2500
2501 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2502 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2503 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2504 nfsm_fhtom(dvp, v3);
2505 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2506 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred);
2507 if (npp && !error) {
2508 nfsm_getfh(nfhp, fhlen, v3);
2509 if (*npp) {
2510 np = *npp;
2511 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2512 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2513 np->n_fhp = &np->n_fh;
2514 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2515 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2516 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2517 np->n_fhsize = fhlen;
2518 newvp = NFSTOV(np);
2519 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2520 VREF(dvp);
2521 newvp = dvp;
2522 } else {
2523 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2524 if (error) {
2525 m_freem(mrep);
2526 return (error);
2527 }
2528 newvp = NFSTOV(np);
2529 }
2530 if (v3) {
2531 nfsm_postop_attr(newvp, attrflag);
2532 if (!attrflag && *npp == NULL) {
2533 m_freem(mrep);
2534 if (newvp == dvp)
2535 vrele(newvp);
2536 else
2537 vput(newvp);
2538 return (ENOENT);
2539 }
2540 } else
2541 nfsm_loadattr(newvp, (struct vattr *)0);
2542 }
2543 nfsm_reqdone;
2544 if (npp && *npp == NULL) {
2545 if (error) {
2546 if (newvp)
2546 if (newvp) {
2547 if (newvp == dvp)
2548 vrele(newvp);
2549 else
2550 vput(newvp);
2547 if (newvp == dvp)
2548 vrele(newvp);
2549 else
2550 vput(newvp);
2551 }
2551 } else
2552 *npp = np;
2553 }
2554 return (error);
2555}
2556
2557/*
2558 * Nfs Version 3 commit rpc
2559 */
2560static int
2561nfs_commit(vp, offset, cnt, cred, procp)
2562 register struct vnode *vp;
2563 u_quad_t offset;
2564 int cnt;
2565 struct ucred *cred;
2566 struct proc *procp;
2567{
2568 register caddr_t cp;
2569 register u_int32_t *tl;
2570 register int32_t t1, t2;
2571 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2572 caddr_t bpos, dpos, cp2;
2573 int error = 0, wccflag = NFSV3_WCCRATTR;
2574 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2575
2576 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2577 return (0);
2578 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2579 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2580 nfsm_fhtom(vp, 1);
2581 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2582 txdr_hyper(&offset, tl);
2583 tl += 2;
2584 *tl = txdr_unsigned(cnt);
2585 nfsm_request(vp, NFSPROC_COMMIT, procp, cred);
2586 nfsm_wcc_data(vp, wccflag);
2587 if (!error) {
2588 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2589 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2590 NFSX_V3WRITEVERF)) {
2591 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2592 NFSX_V3WRITEVERF);
2593 error = NFSERR_STALEWRITEVERF;
2594 }
2595 }
2596 nfsm_reqdone;
2597 return (error);
2598}
2599
2600/*
2601 * Kludge City..
2602 * - make nfs_bmap() essentially a no-op that does no translation
2603 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2604 * (Maybe I could use the process's page mapping, but I was concerned that
2605 * Kernel Write might not be enabled and also figured copyout() would do
2606 * a lot more work than bcopy() and also it currently happens in the
2607 * context of the swapper process (2).
2608 */
2609static int
2610nfs_bmap(ap)
2611 struct vop_bmap_args /* {
2612 struct vnode *a_vp;
2613 daddr_t a_bn;
2614 struct vnode **a_vpp;
2615 daddr_t *a_bnp;
2616 int *a_runp;
2617 int *a_runb;
2618 } */ *ap;
2619{
2620 register struct vnode *vp = ap->a_vp;
2621
2622 if (ap->a_vpp != NULL)
2623 *ap->a_vpp = vp;
2624 if (ap->a_bnp != NULL)
2625 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2626 if (ap->a_runp != NULL)
2627 *ap->a_runp = 0;
2628 if (ap->a_runb != NULL)
2629 *ap->a_runb = 0;
2630 return (0);
2631}
2632
2633/*
2634 * Strategy routine.
2635 * For async requests when nfsiod(s) are running, queue the request by
2636 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2637 * request.
2638 */
2639static int
2640nfs_strategy(ap)
2641 struct vop_strategy_args *ap;
2642{
2643 register struct buf *bp = ap->a_bp;
2644 struct ucred *cr;
2645 struct proc *p;
2646 int error = 0;
2647
2648 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2649 KASSERT((bp->b_flags & B_BUSY), ("nfs_strategy: buffer %p not B_BUSY", bp));
2650
2651 if (bp->b_flags & B_PHYS)
2652 panic("nfs physio");
2653
2654 if (bp->b_flags & B_ASYNC)
2655 p = (struct proc *)0;
2656 else
2657 p = curproc; /* XXX */
2658
2659 if (bp->b_flags & B_READ)
2660 cr = bp->b_rcred;
2661 else
2662 cr = bp->b_wcred;
2663
2664 /*
2665 * If the op is asynchronous and an i/o daemon is waiting
2666 * queue the request, wake it up and wait for completion
2667 * otherwise just do it ourselves.
2668 */
2669 if ((bp->b_flags & B_ASYNC) == 0 ||
2670 nfs_asyncio(bp, NOCRED))
2671 error = nfs_doio(bp, cr, p);
2672 return (error);
2673}
2674
2675/*
2676 * Mmap a file
2677 *
2678 * NB Currently unsupported.
2679 */
2680/* ARGSUSED */
2681static int
2682nfs_mmap(ap)
2683 struct vop_mmap_args /* {
2684 struct vnode *a_vp;
2685 int a_fflags;
2686 struct ucred *a_cred;
2687 struct proc *a_p;
2688 } */ *ap;
2689{
2690
2691 return (EINVAL);
2692}
2693
2694/*
2695 * fsync vnode op. Just call nfs_flush() with commit == 1.
2696 */
2697/* ARGSUSED */
2698static int
2699nfs_fsync(ap)
2700 struct vop_fsync_args /* {
2701 struct vnodeop_desc *a_desc;
2702 struct vnode * a_vp;
2703 struct ucred * a_cred;
2704 int a_waitfor;
2705 struct proc * a_p;
2706 } */ *ap;
2707{
2708
2709 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2710}
2711
2712/*
2713 * Flush all the blocks associated with a vnode.
2714 * Walk through the buffer pool and push any dirty pages
2715 * associated with the vnode.
2716 */
2717static int
2718nfs_flush(vp, cred, waitfor, p, commit)
2719 register struct vnode *vp;
2720 struct ucred *cred;
2721 int waitfor;
2722 struct proc *p;
2723 int commit;
2724{
2725 register struct nfsnode *np = VTONFS(vp);
2726 register struct buf *bp;
2727 register int i;
2728 struct buf *nbp;
2729 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2730 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2731 int passone = 1;
2732 u_quad_t off, endoff, toff;
2733 struct ucred* wcred = NULL;
2734 struct buf **bvec = NULL;
2735#ifndef NFS_COMMITBVECSIZ
2736#define NFS_COMMITBVECSIZ 20
2737#endif
2738 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2739 int bvecsize = 0, bveccount;
2740
2741 if (nmp->nm_flag & NFSMNT_INT)
2742 slpflag = PCATCH;
2743 if (!commit)
2744 passone = 0;
2745 /*
2746 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2747 * server, but nas not been committed to stable storage on the server
2748 * yet. On the first pass, the byte range is worked out and the commit
2749 * rpc is done. On the second pass, nfs_writebp() is called to do the
2750 * job.
2751 */
2752again:
2753 off = (u_quad_t)-1;
2754 endoff = 0;
2755 bvecpos = 0;
2756 if (NFS_ISV3(vp) && commit) {
2757 s = splbio();
2758 /*
2759 * Count up how many buffers waiting for a commit.
2760 */
2761 bveccount = 0;
2762 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2763 nbp = TAILQ_NEXT(bp, b_vnbufs);
2764 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
2765 == (B_DELWRI | B_NEEDCOMMIT))
2766 bveccount++;
2767 }
2768 /*
2769 * Allocate space to remember the list of bufs to commit. It is
2770 * important to use M_NOWAIT here to avoid a race with nfs_write.
2771 * If we can't get memory (for whatever reason), we will end up
2772 * committing the buffers one-by-one in the loop below.
2773 */
2774 if (bveccount > NFS_COMMITBVECSIZ) {
2775 if (bvec != NULL && bvec != bvec_on_stack)
2776 free(bvec, M_TEMP);
2777 bvec = (struct buf **)
2778 malloc(bveccount * sizeof(struct buf *),
2779 M_TEMP, M_NOWAIT);
2780 if (bvec == NULL) {
2781 bvec = bvec_on_stack;
2782 bvecsize = NFS_COMMITBVECSIZ;
2783 } else
2784 bvecsize = bveccount;
2785 } else {
2786 bvec = bvec_on_stack;
2787 bvecsize = NFS_COMMITBVECSIZ;
2788 }
2789 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2790 nbp = TAILQ_NEXT(bp, b_vnbufs);
2791 if (bvecpos >= bvecsize)
2792 break;
2793 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
2794 != (B_DELWRI | B_NEEDCOMMIT))
2795 continue;
2796 bremfree(bp);
2797 /*
2798 * Work out if all buffers are using the same cred
2799 * so we can deal with them all with one commit.
2800 *
2801 * NOTE: we are not clearing B_DONE here, so we have
2802 * to do it later on in this routine if we intend to
2803 * initiate I/O on the bp.
2804 */
2805 if (wcred == NULL)
2806 wcred = bp->b_wcred;
2807 else if (wcred != bp->b_wcred)
2808 wcred = NOCRED;
2809 bp->b_flags |= (B_BUSY | B_WRITEINPROG);
2810 vfs_busy_pages(bp, 1);
2811
2812 /*
2813 * bp is protected by being B_BUSY, but nbp is not
2814 * and vfs_busy_pages() may sleep. We have to
2815 * recalculate nbp.
2816 */
2817 nbp = TAILQ_NEXT(bp, b_vnbufs);
2818
2819 /*
2820 * A list of these buffers is kept so that the
2821 * second loop knows which buffers have actually
2822 * been committed. This is necessary, since there
2823 * may be a race between the commit rpc and new
2824 * uncommitted writes on the file.
2825 */
2826 bvec[bvecpos++] = bp;
2827 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2828 bp->b_dirtyoff;
2829 if (toff < off)
2830 off = toff;
2831 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2832 if (toff > endoff)
2833 endoff = toff;
2834 }
2835 splx(s);
2836 }
2837 if (bvecpos > 0) {
2838 /*
2839 * Commit data on the server, as required.
2840 * If all bufs are using the same wcred, then use that with
2841 * one call for all of them, otherwise commit each one
2842 * separately.
2843 */
2844 if (wcred != NOCRED)
2845 retv = nfs_commit(vp, off, (int)(endoff - off),
2846 wcred, p);
2847 else {
2848 retv = 0;
2849 for (i = 0; i < bvecpos; i++) {
2850 off_t off, size;
2851 bp = bvec[i];
2852 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2853 bp->b_dirtyoff;
2854 size = (u_quad_t)(bp->b_dirtyend
2855 - bp->b_dirtyoff);
2856 retv = nfs_commit(vp, off, (int)size,
2857 bp->b_wcred, p);
2858 if (retv) break;
2859 }
2860 }
2861
2862 if (retv == NFSERR_STALEWRITEVERF)
2863 nfs_clearcommit(vp->v_mount);
2864
2865 /*
2866 * Now, either mark the blocks I/O done or mark the
2867 * blocks dirty, depending on whether the commit
2868 * succeeded.
2869 */
2870 for (i = 0; i < bvecpos; i++) {
2871 bp = bvec[i];
2872 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG);
2873 if (retv) {
2874 /*
2875 * Error, leave B_DELWRI intact
2876 */
2877 vfs_unbusy_pages(bp);
2878 brelse(bp);
2879 } else {
2880 /*
2881 * Success, remove B_DELWRI ( bundirty() ).
2882 *
2883 * b_dirtyoff/b_dirtyend seem to be NFS
2884 * specific. We should probably move that
2885 * into bundirty(). XXX
2886 */
2887 s = splbio();
2888 vp->v_numoutput++;
2889 bp->b_flags |= B_ASYNC;
2890 bundirty(bp);
2891 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
2892 bp->b_dirtyoff = bp->b_dirtyend = 0;
2893 splx(s);
2894 biodone(bp);
2895 }
2896 }
2897 }
2898
2899 /*
2900 * Start/do any write(s) that are required.
2901 */
2902loop:
2903 s = splbio();
2904 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2905 nbp = TAILQ_NEXT(bp, b_vnbufs);
2906 if (bp->b_flags & B_BUSY) {
2907 if (waitfor != MNT_WAIT || passone)
2908 continue;
2909 bp->b_flags |= B_WANTED;
2910 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
2911 "nfsfsync", slptimeo);
2912 splx(s);
2913 if (error) {
2914 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
2915 error = EINTR;
2916 goto done;
2917 }
2918 if (slpflag == PCATCH) {
2919 slpflag = 0;
2920 slptimeo = 2 * hz;
2921 }
2922 }
2923 goto loop;
2924 }
2925 if ((bp->b_flags & B_DELWRI) == 0)
2926 panic("nfs_fsync: not dirty");
2927 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT))
2928 continue;
2929 bremfree(bp);
2930 if (passone || !commit)
2931 bp->b_flags |= (B_BUSY|B_ASYNC);
2932 else
2933 bp->b_flags |= (B_BUSY|B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT);
2934 splx(s);
2935 VOP_BWRITE(bp);
2936 goto loop;
2937 }
2938 splx(s);
2939 if (passone) {
2940 passone = 0;
2941 goto again;
2942 }
2943 if (waitfor == MNT_WAIT) {
2944 while (vp->v_numoutput) {
2945 vp->v_flag |= VBWAIT;
2946 error = tsleep((caddr_t)&vp->v_numoutput,
2947 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
2948 if (error) {
2949 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
2950 error = EINTR;
2951 goto done;
2952 }
2953 if (slpflag == PCATCH) {
2954 slpflag = 0;
2955 slptimeo = 2 * hz;
2956 }
2957 }
2958 }
2959 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
2960 goto loop;
2961 }
2962 }
2963 if (np->n_flag & NWRITEERR) {
2964 error = np->n_error;
2965 np->n_flag &= ~NWRITEERR;
2966 }
2967done:
2968 if (bvec != NULL && bvec != bvec_on_stack)
2969 free(bvec, M_TEMP);
2970 return (error);
2971}
2972
2973/*
2974 * NFS advisory byte-level locks.
2975 * Currently unsupported.
2976 */
2977static int
2978nfs_advlock(ap)
2979 struct vop_advlock_args /* {
2980 struct vnode *a_vp;
2981 caddr_t a_id;
2982 int a_op;
2983 struct flock *a_fl;
2984 int a_flags;
2985 } */ *ap;
2986{
2987 register struct nfsnode *np = VTONFS(ap->a_vp);
2988
2989 /*
2990 * The following kludge is to allow diskless support to work
2991 * until a real NFS lockd is implemented. Basically, just pretend
2992 * that this is a local lock.
2993 */
2994 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
2995}
2996
2997/*
2998 * Print out the contents of an nfsnode.
2999 */
3000static int
3001nfs_print(ap)
3002 struct vop_print_args /* {
3003 struct vnode *a_vp;
3004 } */ *ap;
3005{
3006 register struct vnode *vp = ap->a_vp;
3007 register struct nfsnode *np = VTONFS(vp);
3008
3009 printf("tag VT_NFS, fileid %ld fsid 0x%lx",
3010 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3011 if (vp->v_type == VFIFO)
3012 fifo_printinfo(vp);
3013 printf("\n");
3014 return (0);
3015}
3016
3017/*
3018 * Just call nfs_writebp() with the force argument set to 1.
3019 *
3020 * NOTE: B_DONE may or may not be set in a_bp on call.
3021 */
3022static int
3023nfs_bwrite(ap)
3024 struct vop_bwrite_args /* {
3025 struct vnode *a_bp;
3026 } */ *ap;
3027{
3028 return (nfs_writebp(ap->a_bp, 1));
3029}
3030
3031/*
3032 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3033 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3034 * B_CACHE if this is a VMIO buffer.
3035 */
3036int
3037nfs_writebp(bp, force)
3038 register struct buf *bp;
3039 int force;
3040{
3041 int s;
3042 int oldflags = bp->b_flags;
3043 int retv = 1;
3044 off_t off;
3045
3046 if(!(bp->b_flags & B_BUSY))
3047 panic("bwrite: buffer is not busy???");
3048
3049 if (bp->b_flags & B_INVAL) {
3050 brelse(bp);
3051 return(0);
3052 }
3053
3054 bp->b_flags |= B_CACHE;
3055
3056 /*
3057 * Undirty the bp. We will redirty it later if the I/O fails.
3058 */
3059
3060 s = splbio();
3061 bundirty(bp);
3062 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3063
3064 bp->b_vp->v_numoutput++;
3065 curproc->p_stats->p_ru.ru_oublock++;
3066 splx(s);
3067
3068 /*
3069 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
3070 * an actual write will have to be scheduled via. VOP_STRATEGY().
3071 * If B_WRITEINPROG is already set, then push it with a write anyhow.
3072 */
3073 vfs_busy_pages(bp, 1);
3074 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) {
3075 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
3076 bp->b_flags |= B_WRITEINPROG;
3077 retv = nfs_commit(bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff,
3078 bp->b_wcred, bp->b_proc);
3079 bp->b_flags &= ~B_WRITEINPROG;
3080 if (!retv) {
3081 bp->b_dirtyoff = bp->b_dirtyend = 0;
3082 bp->b_flags &= ~B_NEEDCOMMIT;
3083 biodone(bp);
3084 } else if (retv == NFSERR_STALEWRITEVERF) {
3085 nfs_clearcommit(bp->b_vp->v_mount);
3086 }
3087 }
3088 if (retv) {
3089 if (force)
3090 bp->b_flags |= B_WRITEINPROG;
3091 VOP_STRATEGY(bp->b_vp, bp);
3092 }
3093
3094 if( (oldflags & B_ASYNC) == 0) {
3095 int rtval = biowait(bp);
3096
3097 if (oldflags & B_DELWRI) {
3098 s = splbio();
3099 reassignbuf(bp, bp->b_vp);
3100 splx(s);
3101 }
3102
3103 brelse(bp);
3104 return (rtval);
3105 }
3106
3107 return (0);
3108}
3109
3110/*
3111 * nfs special file access vnode op.
3112 * Essentially just get vattr and then imitate iaccess() since the device is
3113 * local to the client.
3114 */
3115static int
3116nfsspec_access(ap)
3117 struct vop_access_args /* {
3118 struct vnode *a_vp;
3119 int a_mode;
3120 struct ucred *a_cred;
3121 struct proc *a_p;
3122 } */ *ap;
3123{
3124 register struct vattr *vap;
3125 register gid_t *gp;
3126 register struct ucred *cred = ap->a_cred;
3127 struct vnode *vp = ap->a_vp;
3128 mode_t mode = ap->a_mode;
3129 struct vattr vattr;
3130 register int i;
3131 int error;
3132
3133 /*
3134 * Disallow write attempts on filesystems mounted read-only;
3135 * unless the file is a socket, fifo, or a block or character
3136 * device resident on the filesystem.
3137 */
3138 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3139 switch (vp->v_type) {
3140 case VREG:
3141 case VDIR:
3142 case VLNK:
3143 return (EROFS);
3144 default:
3145 break;
3146 }
3147 }
3148 /*
3149 * If you're the super-user,
3150 * you always get access.
3151 */
3152 if (cred->cr_uid == 0)
3153 return (0);
3154 vap = &vattr;
3155 error = VOP_GETATTR(vp, vap, cred, ap->a_p);
3156 if (error)
3157 return (error);
3158 /*
3159 * Access check is based on only one of owner, group, public.
3160 * If not owner, then check group. If not a member of the
3161 * group, then check public access.
3162 */
3163 if (cred->cr_uid != vap->va_uid) {
3164 mode >>= 3;
3165 gp = cred->cr_groups;
3166 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3167 if (vap->va_gid == *gp)
3168 goto found;
3169 mode >>= 3;
3170found:
3171 ;
3172 }
3173 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3174 return (error);
3175}
3176
3177/*
3178 * Read wrapper for special devices.
3179 */
3180static int
3181nfsspec_read(ap)
3182 struct vop_read_args /* {
3183 struct vnode *a_vp;
3184 struct uio *a_uio;
3185 int a_ioflag;
3186 struct ucred *a_cred;
3187 } */ *ap;
3188{
3189 register struct nfsnode *np = VTONFS(ap->a_vp);
3190
3191 /*
3192 * Set access flag.
3193 */
3194 np->n_flag |= NACC;
3195 getnanotime(&np->n_atim);
3196 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3197}
3198
3199/*
3200 * Write wrapper for special devices.
3201 */
3202static int
3203nfsspec_write(ap)
3204 struct vop_write_args /* {
3205 struct vnode *a_vp;
3206 struct uio *a_uio;
3207 int a_ioflag;
3208 struct ucred *a_cred;
3209 } */ *ap;
3210{
3211 register struct nfsnode *np = VTONFS(ap->a_vp);
3212
3213 /*
3214 * Set update flag.
3215 */
3216 np->n_flag |= NUPD;
3217 getnanotime(&np->n_mtim);
3218 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3219}
3220
3221/*
3222 * Close wrapper for special devices.
3223 *
3224 * Update the times on the nfsnode then do device close.
3225 */
3226static int
3227nfsspec_close(ap)
3228 struct vop_close_args /* {
3229 struct vnode *a_vp;
3230 int a_fflag;
3231 struct ucred *a_cred;
3232 struct proc *a_p;
3233 } */ *ap;
3234{
3235 register struct vnode *vp = ap->a_vp;
3236 register struct nfsnode *np = VTONFS(vp);
3237 struct vattr vattr;
3238
3239 if (np->n_flag & (NACC | NUPD)) {
3240 np->n_flag |= NCHG;
3241 if (vp->v_usecount == 1 &&
3242 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3243 VATTR_NULL(&vattr);
3244 if (np->n_flag & NACC)
3245 vattr.va_atime = np->n_atim;
3246 if (np->n_flag & NUPD)
3247 vattr.va_mtime = np->n_mtim;
3248 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3249 }
3250 }
3251 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3252}
3253
3254/*
3255 * Read wrapper for fifos.
3256 */
3257static int
3258nfsfifo_read(ap)
3259 struct vop_read_args /* {
3260 struct vnode *a_vp;
3261 struct uio *a_uio;
3262 int a_ioflag;
3263 struct ucred *a_cred;
3264 } */ *ap;
3265{
3266 register struct nfsnode *np = VTONFS(ap->a_vp);
3267
3268 /*
3269 * Set access flag.
3270 */
3271 np->n_flag |= NACC;
3272 getnanotime(&np->n_atim);
3273 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3274}
3275
3276/*
3277 * Write wrapper for fifos.
3278 */
3279static int
3280nfsfifo_write(ap)
3281 struct vop_write_args /* {
3282 struct vnode *a_vp;
3283 struct uio *a_uio;
3284 int a_ioflag;
3285 struct ucred *a_cred;
3286 } */ *ap;
3287{
3288 register struct nfsnode *np = VTONFS(ap->a_vp);
3289
3290 /*
3291 * Set update flag.
3292 */
3293 np->n_flag |= NUPD;
3294 getnanotime(&np->n_mtim);
3295 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3296}
3297
3298/*
3299 * Close wrapper for fifos.
3300 *
3301 * Update the times on the nfsnode then do fifo close.
3302 */
3303static int
3304nfsfifo_close(ap)
3305 struct vop_close_args /* {
3306 struct vnode *a_vp;
3307 int a_fflag;
3308 struct ucred *a_cred;
3309 struct proc *a_p;
3310 } */ *ap;
3311{
3312 register struct vnode *vp = ap->a_vp;
3313 register struct nfsnode *np = VTONFS(vp);
3314 struct vattr vattr;
3315 struct timespec ts;
3316
3317 if (np->n_flag & (NACC | NUPD)) {
3318 getnanotime(&ts);
3319 if (np->n_flag & NACC)
3320 np->n_atim = ts;
3321 if (np->n_flag & NUPD)
3322 np->n_mtim = ts;
3323 np->n_flag |= NCHG;
3324 if (vp->v_usecount == 1 &&
3325 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3326 VATTR_NULL(&vattr);
3327 if (np->n_flag & NACC)
3328 vattr.va_atime = np->n_atim;
3329 if (np->n_flag & NUPD)
3330 vattr.va_mtime = np->n_mtim;
3331 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3332 }
3333 }
3334 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3335}
2552 } else
2553 *npp = np;
2554 }
2555 return (error);
2556}
2557
2558/*
2559 * Nfs Version 3 commit rpc
2560 */
2561static int
2562nfs_commit(vp, offset, cnt, cred, procp)
2563 register struct vnode *vp;
2564 u_quad_t offset;
2565 int cnt;
2566 struct ucred *cred;
2567 struct proc *procp;
2568{
2569 register caddr_t cp;
2570 register u_int32_t *tl;
2571 register int32_t t1, t2;
2572 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2573 caddr_t bpos, dpos, cp2;
2574 int error = 0, wccflag = NFSV3_WCCRATTR;
2575 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2576
2577 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2578 return (0);
2579 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2580 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2581 nfsm_fhtom(vp, 1);
2582 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2583 txdr_hyper(&offset, tl);
2584 tl += 2;
2585 *tl = txdr_unsigned(cnt);
2586 nfsm_request(vp, NFSPROC_COMMIT, procp, cred);
2587 nfsm_wcc_data(vp, wccflag);
2588 if (!error) {
2589 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2590 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2591 NFSX_V3WRITEVERF)) {
2592 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2593 NFSX_V3WRITEVERF);
2594 error = NFSERR_STALEWRITEVERF;
2595 }
2596 }
2597 nfsm_reqdone;
2598 return (error);
2599}
2600
2601/*
2602 * Kludge City..
2603 * - make nfs_bmap() essentially a no-op that does no translation
2604 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2605 * (Maybe I could use the process's page mapping, but I was concerned that
2606 * Kernel Write might not be enabled and also figured copyout() would do
2607 * a lot more work than bcopy() and also it currently happens in the
2608 * context of the swapper process (2).
2609 */
2610static int
2611nfs_bmap(ap)
2612 struct vop_bmap_args /* {
2613 struct vnode *a_vp;
2614 daddr_t a_bn;
2615 struct vnode **a_vpp;
2616 daddr_t *a_bnp;
2617 int *a_runp;
2618 int *a_runb;
2619 } */ *ap;
2620{
2621 register struct vnode *vp = ap->a_vp;
2622
2623 if (ap->a_vpp != NULL)
2624 *ap->a_vpp = vp;
2625 if (ap->a_bnp != NULL)
2626 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2627 if (ap->a_runp != NULL)
2628 *ap->a_runp = 0;
2629 if (ap->a_runb != NULL)
2630 *ap->a_runb = 0;
2631 return (0);
2632}
2633
2634/*
2635 * Strategy routine.
2636 * For async requests when nfsiod(s) are running, queue the request by
2637 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2638 * request.
2639 */
2640static int
2641nfs_strategy(ap)
2642 struct vop_strategy_args *ap;
2643{
2644 register struct buf *bp = ap->a_bp;
2645 struct ucred *cr;
2646 struct proc *p;
2647 int error = 0;
2648
2649 KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2650 KASSERT((bp->b_flags & B_BUSY), ("nfs_strategy: buffer %p not B_BUSY", bp));
2651
2652 if (bp->b_flags & B_PHYS)
2653 panic("nfs physio");
2654
2655 if (bp->b_flags & B_ASYNC)
2656 p = (struct proc *)0;
2657 else
2658 p = curproc; /* XXX */
2659
2660 if (bp->b_flags & B_READ)
2661 cr = bp->b_rcred;
2662 else
2663 cr = bp->b_wcred;
2664
2665 /*
2666 * If the op is asynchronous and an i/o daemon is waiting
2667 * queue the request, wake it up and wait for completion
2668 * otherwise just do it ourselves.
2669 */
2670 if ((bp->b_flags & B_ASYNC) == 0 ||
2671 nfs_asyncio(bp, NOCRED))
2672 error = nfs_doio(bp, cr, p);
2673 return (error);
2674}
2675
2676/*
2677 * Mmap a file
2678 *
2679 * NB Currently unsupported.
2680 */
2681/* ARGSUSED */
2682static int
2683nfs_mmap(ap)
2684 struct vop_mmap_args /* {
2685 struct vnode *a_vp;
2686 int a_fflags;
2687 struct ucred *a_cred;
2688 struct proc *a_p;
2689 } */ *ap;
2690{
2691
2692 return (EINVAL);
2693}
2694
2695/*
2696 * fsync vnode op. Just call nfs_flush() with commit == 1.
2697 */
2698/* ARGSUSED */
2699static int
2700nfs_fsync(ap)
2701 struct vop_fsync_args /* {
2702 struct vnodeop_desc *a_desc;
2703 struct vnode * a_vp;
2704 struct ucred * a_cred;
2705 int a_waitfor;
2706 struct proc * a_p;
2707 } */ *ap;
2708{
2709
2710 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2711}
2712
2713/*
2714 * Flush all the blocks associated with a vnode.
2715 * Walk through the buffer pool and push any dirty pages
2716 * associated with the vnode.
2717 */
2718static int
2719nfs_flush(vp, cred, waitfor, p, commit)
2720 register struct vnode *vp;
2721 struct ucred *cred;
2722 int waitfor;
2723 struct proc *p;
2724 int commit;
2725{
2726 register struct nfsnode *np = VTONFS(vp);
2727 register struct buf *bp;
2728 register int i;
2729 struct buf *nbp;
2730 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2731 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2732 int passone = 1;
2733 u_quad_t off, endoff, toff;
2734 struct ucred* wcred = NULL;
2735 struct buf **bvec = NULL;
2736#ifndef NFS_COMMITBVECSIZ
2737#define NFS_COMMITBVECSIZ 20
2738#endif
2739 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2740 int bvecsize = 0, bveccount;
2741
2742 if (nmp->nm_flag & NFSMNT_INT)
2743 slpflag = PCATCH;
2744 if (!commit)
2745 passone = 0;
2746 /*
2747 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2748 * server, but nas not been committed to stable storage on the server
2749 * yet. On the first pass, the byte range is worked out and the commit
2750 * rpc is done. On the second pass, nfs_writebp() is called to do the
2751 * job.
2752 */
2753again:
2754 off = (u_quad_t)-1;
2755 endoff = 0;
2756 bvecpos = 0;
2757 if (NFS_ISV3(vp) && commit) {
2758 s = splbio();
2759 /*
2760 * Count up how many buffers waiting for a commit.
2761 */
2762 bveccount = 0;
2763 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2764 nbp = TAILQ_NEXT(bp, b_vnbufs);
2765 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
2766 == (B_DELWRI | B_NEEDCOMMIT))
2767 bveccount++;
2768 }
2769 /*
2770 * Allocate space to remember the list of bufs to commit. It is
2771 * important to use M_NOWAIT here to avoid a race with nfs_write.
2772 * If we can't get memory (for whatever reason), we will end up
2773 * committing the buffers one-by-one in the loop below.
2774 */
2775 if (bveccount > NFS_COMMITBVECSIZ) {
2776 if (bvec != NULL && bvec != bvec_on_stack)
2777 free(bvec, M_TEMP);
2778 bvec = (struct buf **)
2779 malloc(bveccount * sizeof(struct buf *),
2780 M_TEMP, M_NOWAIT);
2781 if (bvec == NULL) {
2782 bvec = bvec_on_stack;
2783 bvecsize = NFS_COMMITBVECSIZ;
2784 } else
2785 bvecsize = bveccount;
2786 } else {
2787 bvec = bvec_on_stack;
2788 bvecsize = NFS_COMMITBVECSIZ;
2789 }
2790 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2791 nbp = TAILQ_NEXT(bp, b_vnbufs);
2792 if (bvecpos >= bvecsize)
2793 break;
2794 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
2795 != (B_DELWRI | B_NEEDCOMMIT))
2796 continue;
2797 bremfree(bp);
2798 /*
2799 * Work out if all buffers are using the same cred
2800 * so we can deal with them all with one commit.
2801 *
2802 * NOTE: we are not clearing B_DONE here, so we have
2803 * to do it later on in this routine if we intend to
2804 * initiate I/O on the bp.
2805 */
2806 if (wcred == NULL)
2807 wcred = bp->b_wcred;
2808 else if (wcred != bp->b_wcred)
2809 wcred = NOCRED;
2810 bp->b_flags |= (B_BUSY | B_WRITEINPROG);
2811 vfs_busy_pages(bp, 1);
2812
2813 /*
2814 * bp is protected by being B_BUSY, but nbp is not
2815 * and vfs_busy_pages() may sleep. We have to
2816 * recalculate nbp.
2817 */
2818 nbp = TAILQ_NEXT(bp, b_vnbufs);
2819
2820 /*
2821 * A list of these buffers is kept so that the
2822 * second loop knows which buffers have actually
2823 * been committed. This is necessary, since there
2824 * may be a race between the commit rpc and new
2825 * uncommitted writes on the file.
2826 */
2827 bvec[bvecpos++] = bp;
2828 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2829 bp->b_dirtyoff;
2830 if (toff < off)
2831 off = toff;
2832 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2833 if (toff > endoff)
2834 endoff = toff;
2835 }
2836 splx(s);
2837 }
2838 if (bvecpos > 0) {
2839 /*
2840 * Commit data on the server, as required.
2841 * If all bufs are using the same wcred, then use that with
2842 * one call for all of them, otherwise commit each one
2843 * separately.
2844 */
2845 if (wcred != NOCRED)
2846 retv = nfs_commit(vp, off, (int)(endoff - off),
2847 wcred, p);
2848 else {
2849 retv = 0;
2850 for (i = 0; i < bvecpos; i++) {
2851 off_t off, size;
2852 bp = bvec[i];
2853 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2854 bp->b_dirtyoff;
2855 size = (u_quad_t)(bp->b_dirtyend
2856 - bp->b_dirtyoff);
2857 retv = nfs_commit(vp, off, (int)size,
2858 bp->b_wcred, p);
2859 if (retv) break;
2860 }
2861 }
2862
2863 if (retv == NFSERR_STALEWRITEVERF)
2864 nfs_clearcommit(vp->v_mount);
2865
2866 /*
2867 * Now, either mark the blocks I/O done or mark the
2868 * blocks dirty, depending on whether the commit
2869 * succeeded.
2870 */
2871 for (i = 0; i < bvecpos; i++) {
2872 bp = bvec[i];
2873 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG);
2874 if (retv) {
2875 /*
2876 * Error, leave B_DELWRI intact
2877 */
2878 vfs_unbusy_pages(bp);
2879 brelse(bp);
2880 } else {
2881 /*
2882 * Success, remove B_DELWRI ( bundirty() ).
2883 *
2884 * b_dirtyoff/b_dirtyend seem to be NFS
2885 * specific. We should probably move that
2886 * into bundirty(). XXX
2887 */
2888 s = splbio();
2889 vp->v_numoutput++;
2890 bp->b_flags |= B_ASYNC;
2891 bundirty(bp);
2892 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
2893 bp->b_dirtyoff = bp->b_dirtyend = 0;
2894 splx(s);
2895 biodone(bp);
2896 }
2897 }
2898 }
2899
2900 /*
2901 * Start/do any write(s) that are required.
2902 */
2903loop:
2904 s = splbio();
2905 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2906 nbp = TAILQ_NEXT(bp, b_vnbufs);
2907 if (bp->b_flags & B_BUSY) {
2908 if (waitfor != MNT_WAIT || passone)
2909 continue;
2910 bp->b_flags |= B_WANTED;
2911 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
2912 "nfsfsync", slptimeo);
2913 splx(s);
2914 if (error) {
2915 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
2916 error = EINTR;
2917 goto done;
2918 }
2919 if (slpflag == PCATCH) {
2920 slpflag = 0;
2921 slptimeo = 2 * hz;
2922 }
2923 }
2924 goto loop;
2925 }
2926 if ((bp->b_flags & B_DELWRI) == 0)
2927 panic("nfs_fsync: not dirty");
2928 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT))
2929 continue;
2930 bremfree(bp);
2931 if (passone || !commit)
2932 bp->b_flags |= (B_BUSY|B_ASYNC);
2933 else
2934 bp->b_flags |= (B_BUSY|B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT);
2935 splx(s);
2936 VOP_BWRITE(bp);
2937 goto loop;
2938 }
2939 splx(s);
2940 if (passone) {
2941 passone = 0;
2942 goto again;
2943 }
2944 if (waitfor == MNT_WAIT) {
2945 while (vp->v_numoutput) {
2946 vp->v_flag |= VBWAIT;
2947 error = tsleep((caddr_t)&vp->v_numoutput,
2948 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
2949 if (error) {
2950 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
2951 error = EINTR;
2952 goto done;
2953 }
2954 if (slpflag == PCATCH) {
2955 slpflag = 0;
2956 slptimeo = 2 * hz;
2957 }
2958 }
2959 }
2960 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
2961 goto loop;
2962 }
2963 }
2964 if (np->n_flag & NWRITEERR) {
2965 error = np->n_error;
2966 np->n_flag &= ~NWRITEERR;
2967 }
2968done:
2969 if (bvec != NULL && bvec != bvec_on_stack)
2970 free(bvec, M_TEMP);
2971 return (error);
2972}
2973
2974/*
2975 * NFS advisory byte-level locks.
2976 * Currently unsupported.
2977 */
2978static int
2979nfs_advlock(ap)
2980 struct vop_advlock_args /* {
2981 struct vnode *a_vp;
2982 caddr_t a_id;
2983 int a_op;
2984 struct flock *a_fl;
2985 int a_flags;
2986 } */ *ap;
2987{
2988 register struct nfsnode *np = VTONFS(ap->a_vp);
2989
2990 /*
2991 * The following kludge is to allow diskless support to work
2992 * until a real NFS lockd is implemented. Basically, just pretend
2993 * that this is a local lock.
2994 */
2995 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
2996}
2997
2998/*
2999 * Print out the contents of an nfsnode.
3000 */
3001static int
3002nfs_print(ap)
3003 struct vop_print_args /* {
3004 struct vnode *a_vp;
3005 } */ *ap;
3006{
3007 register struct vnode *vp = ap->a_vp;
3008 register struct nfsnode *np = VTONFS(vp);
3009
3010 printf("tag VT_NFS, fileid %ld fsid 0x%lx",
3011 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3012 if (vp->v_type == VFIFO)
3013 fifo_printinfo(vp);
3014 printf("\n");
3015 return (0);
3016}
3017
3018/*
3019 * Just call nfs_writebp() with the force argument set to 1.
3020 *
3021 * NOTE: B_DONE may or may not be set in a_bp on call.
3022 */
3023static int
3024nfs_bwrite(ap)
3025 struct vop_bwrite_args /* {
3026 struct vnode *a_bp;
3027 } */ *ap;
3028{
3029 return (nfs_writebp(ap->a_bp, 1));
3030}
3031
3032/*
3033 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3034 * the force flag is one and it also handles the B_NEEDCOMMIT flag. We set
3035 * B_CACHE if this is a VMIO buffer.
3036 */
3037int
3038nfs_writebp(bp, force)
3039 register struct buf *bp;
3040 int force;
3041{
3042 int s;
3043 int oldflags = bp->b_flags;
3044 int retv = 1;
3045 off_t off;
3046
3047 if(!(bp->b_flags & B_BUSY))
3048 panic("bwrite: buffer is not busy???");
3049
3050 if (bp->b_flags & B_INVAL) {
3051 brelse(bp);
3052 return(0);
3053 }
3054
3055 bp->b_flags |= B_CACHE;
3056
3057 /*
3058 * Undirty the bp. We will redirty it later if the I/O fails.
3059 */
3060
3061 s = splbio();
3062 bundirty(bp);
3063 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR);
3064
3065 bp->b_vp->v_numoutput++;
3066 curproc->p_stats->p_ru.ru_oublock++;
3067 splx(s);
3068
3069 /*
3070 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
3071 * an actual write will have to be scheduled via. VOP_STRATEGY().
3072 * If B_WRITEINPROG is already set, then push it with a write anyhow.
3073 */
3074 vfs_busy_pages(bp, 1);
3075 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) {
3076 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
3077 bp->b_flags |= B_WRITEINPROG;
3078 retv = nfs_commit(bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff,
3079 bp->b_wcred, bp->b_proc);
3080 bp->b_flags &= ~B_WRITEINPROG;
3081 if (!retv) {
3082 bp->b_dirtyoff = bp->b_dirtyend = 0;
3083 bp->b_flags &= ~B_NEEDCOMMIT;
3084 biodone(bp);
3085 } else if (retv == NFSERR_STALEWRITEVERF) {
3086 nfs_clearcommit(bp->b_vp->v_mount);
3087 }
3088 }
3089 if (retv) {
3090 if (force)
3091 bp->b_flags |= B_WRITEINPROG;
3092 VOP_STRATEGY(bp->b_vp, bp);
3093 }
3094
3095 if( (oldflags & B_ASYNC) == 0) {
3096 int rtval = biowait(bp);
3097
3098 if (oldflags & B_DELWRI) {
3099 s = splbio();
3100 reassignbuf(bp, bp->b_vp);
3101 splx(s);
3102 }
3103
3104 brelse(bp);
3105 return (rtval);
3106 }
3107
3108 return (0);
3109}
3110
3111/*
3112 * nfs special file access vnode op.
3113 * Essentially just get vattr and then imitate iaccess() since the device is
3114 * local to the client.
3115 */
3116static int
3117nfsspec_access(ap)
3118 struct vop_access_args /* {
3119 struct vnode *a_vp;
3120 int a_mode;
3121 struct ucred *a_cred;
3122 struct proc *a_p;
3123 } */ *ap;
3124{
3125 register struct vattr *vap;
3126 register gid_t *gp;
3127 register struct ucred *cred = ap->a_cred;
3128 struct vnode *vp = ap->a_vp;
3129 mode_t mode = ap->a_mode;
3130 struct vattr vattr;
3131 register int i;
3132 int error;
3133
3134 /*
3135 * Disallow write attempts on filesystems mounted read-only;
3136 * unless the file is a socket, fifo, or a block or character
3137 * device resident on the filesystem.
3138 */
3139 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3140 switch (vp->v_type) {
3141 case VREG:
3142 case VDIR:
3143 case VLNK:
3144 return (EROFS);
3145 default:
3146 break;
3147 }
3148 }
3149 /*
3150 * If you're the super-user,
3151 * you always get access.
3152 */
3153 if (cred->cr_uid == 0)
3154 return (0);
3155 vap = &vattr;
3156 error = VOP_GETATTR(vp, vap, cred, ap->a_p);
3157 if (error)
3158 return (error);
3159 /*
3160 * Access check is based on only one of owner, group, public.
3161 * If not owner, then check group. If not a member of the
3162 * group, then check public access.
3163 */
3164 if (cred->cr_uid != vap->va_uid) {
3165 mode >>= 3;
3166 gp = cred->cr_groups;
3167 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3168 if (vap->va_gid == *gp)
3169 goto found;
3170 mode >>= 3;
3171found:
3172 ;
3173 }
3174 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3175 return (error);
3176}
3177
3178/*
3179 * Read wrapper for special devices.
3180 */
3181static int
3182nfsspec_read(ap)
3183 struct vop_read_args /* {
3184 struct vnode *a_vp;
3185 struct uio *a_uio;
3186 int a_ioflag;
3187 struct ucred *a_cred;
3188 } */ *ap;
3189{
3190 register struct nfsnode *np = VTONFS(ap->a_vp);
3191
3192 /*
3193 * Set access flag.
3194 */
3195 np->n_flag |= NACC;
3196 getnanotime(&np->n_atim);
3197 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3198}
3199
3200/*
3201 * Write wrapper for special devices.
3202 */
3203static int
3204nfsspec_write(ap)
3205 struct vop_write_args /* {
3206 struct vnode *a_vp;
3207 struct uio *a_uio;
3208 int a_ioflag;
3209 struct ucred *a_cred;
3210 } */ *ap;
3211{
3212 register struct nfsnode *np = VTONFS(ap->a_vp);
3213
3214 /*
3215 * Set update flag.
3216 */
3217 np->n_flag |= NUPD;
3218 getnanotime(&np->n_mtim);
3219 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3220}
3221
3222/*
3223 * Close wrapper for special devices.
3224 *
3225 * Update the times on the nfsnode then do device close.
3226 */
3227static int
3228nfsspec_close(ap)
3229 struct vop_close_args /* {
3230 struct vnode *a_vp;
3231 int a_fflag;
3232 struct ucred *a_cred;
3233 struct proc *a_p;
3234 } */ *ap;
3235{
3236 register struct vnode *vp = ap->a_vp;
3237 register struct nfsnode *np = VTONFS(vp);
3238 struct vattr vattr;
3239
3240 if (np->n_flag & (NACC | NUPD)) {
3241 np->n_flag |= NCHG;
3242 if (vp->v_usecount == 1 &&
3243 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3244 VATTR_NULL(&vattr);
3245 if (np->n_flag & NACC)
3246 vattr.va_atime = np->n_atim;
3247 if (np->n_flag & NUPD)
3248 vattr.va_mtime = np->n_mtim;
3249 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3250 }
3251 }
3252 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3253}
3254
3255/*
3256 * Read wrapper for fifos.
3257 */
3258static int
3259nfsfifo_read(ap)
3260 struct vop_read_args /* {
3261 struct vnode *a_vp;
3262 struct uio *a_uio;
3263 int a_ioflag;
3264 struct ucred *a_cred;
3265 } */ *ap;
3266{
3267 register struct nfsnode *np = VTONFS(ap->a_vp);
3268
3269 /*
3270 * Set access flag.
3271 */
3272 np->n_flag |= NACC;
3273 getnanotime(&np->n_atim);
3274 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3275}
3276
3277/*
3278 * Write wrapper for fifos.
3279 */
3280static int
3281nfsfifo_write(ap)
3282 struct vop_write_args /* {
3283 struct vnode *a_vp;
3284 struct uio *a_uio;
3285 int a_ioflag;
3286 struct ucred *a_cred;
3287 } */ *ap;
3288{
3289 register struct nfsnode *np = VTONFS(ap->a_vp);
3290
3291 /*
3292 * Set update flag.
3293 */
3294 np->n_flag |= NUPD;
3295 getnanotime(&np->n_mtim);
3296 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3297}
3298
3299/*
3300 * Close wrapper for fifos.
3301 *
3302 * Update the times on the nfsnode then do fifo close.
3303 */
3304static int
3305nfsfifo_close(ap)
3306 struct vop_close_args /* {
3307 struct vnode *a_vp;
3308 int a_fflag;
3309 struct ucred *a_cred;
3310 struct proc *a_p;
3311 } */ *ap;
3312{
3313 register struct vnode *vp = ap->a_vp;
3314 register struct nfsnode *np = VTONFS(vp);
3315 struct vattr vattr;
3316 struct timespec ts;
3317
3318 if (np->n_flag & (NACC | NUPD)) {
3319 getnanotime(&ts);
3320 if (np->n_flag & NACC)
3321 np->n_atim = ts;
3322 if (np->n_flag & NUPD)
3323 np->n_mtim = ts;
3324 np->n_flag |= NCHG;
3325 if (vp->v_usecount == 1 &&
3326 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3327 VATTR_NULL(&vattr);
3328 if (np->n_flag & NACC)
3329 vattr.va_atime = np->n_atim;
3330 if (np->n_flag & NUPD)
3331 vattr.va_mtime = np->n_mtim;
3332 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3333 }
3334 }
3335 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3336}