nfs_subs.c revision 190396
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_subs.c  8.8 (Berkeley) 5/22/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/nfsclient/nfs_subs.c 190396 2009-03-24 23:16:48Z rwatson $");
37
38/*
39 * These functions support the macros and help fiddle mbuf chains for
40 * the nfs op functions. They do things like create the rpc header and
41 * copy data between mbuf chains and uio lists.
42 */
43
44#include "opt_kdtrace.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/bio.h>
50#include <sys/buf.h>
51#include <sys/proc.h>
52#include <sys/mount.h>
53#include <sys/vnode.h>
54#include <sys/namei.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57#include <sys/stat.h>
58#include <sys/malloc.h>
59#include <sys/sysent.h>
60#include <sys/syscall.h>
61#include <sys/sysproto.h>
62
63#include <vm/vm.h>
64#include <vm/vm_object.h>
65#include <vm/vm_extern.h>
66#include <vm/uma.h>
67
68#include <rpc/rpcclnt.h>
69
70#include <nfs/rpcv2.h>
71#include <nfs/nfsproto.h>
72#include <nfsclient/nfs.h>
73#include <nfsclient/nfsnode.h>
74#include <nfsclient/nfs_kdtrace.h>
75#include <nfs/xdr_subs.h>
76#include <nfsclient/nfsm_subs.h>
77#include <nfsclient/nfsmount.h>
78
79#include <netinet/in.h>
80
81/*
82 * Note that stdarg.h and the ANSI style va_start macro is used for both
83 * ANSI and traditional C compilers.
84 */
85#include <machine/stdarg.h>
86
87#ifdef KDTRACE_HOOKS
88dtrace_nfsclient_attrcache_flush_probe_func_t
89    dtrace_nfsclient_attrcache_flush_done_probe;
90uint32_t nfsclient_attrcache_flush_done_id;
91
92dtrace_nfsclient_attrcache_get_hit_probe_func_t
93    dtrace_nfsclient_attrcache_get_hit_probe;
94uint32_t nfsclient_attrcache_get_hit_id;
95
96dtrace_nfsclient_attrcache_get_miss_probe_func_t
97    dtrace_nfsclient_attrcache_get_miss_probe;
98uint32_t nfsclient_attrcache_get_miss_id;
99
100dtrace_nfsclient_attrcache_load_probe_func_t
101    dtrace_nfsclient_attrcache_load_done_probe;
102uint32_t nfsclient_attrcache_load_done_id;
103#endif /* !KDTRACE_HOOKS */
104
105/*
106 * Data items converted to xdr at startup, since they are constant
107 * This is kinda hokey, but may save a little time doing byte swaps
108 */
109u_int32_t	nfs_xdrneg1;
110u_int32_t	rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr,
111		    rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
112u_int32_t	nfs_true, nfs_false;
113
114/* And other global data */
115static u_int32_t nfs_xid = 0;
116static enum vtype nv2tov_type[8]= {
117	VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON,  VNON
118};
119
120int		nfs_ticks;
121int		nfs_pbuf_freecnt = -1;	/* start out unlimited */
122
123#ifdef NFS_LEGACYRPC
124struct nfs_reqq	nfs_reqq;
125struct mtx nfs_reqq_mtx;
126#endif
127struct nfs_bufq	nfs_bufq;
128static struct mtx nfs_xid_mtx;
129
130/*
131 * and the reverse mapping from generic to Version 2 procedure numbers
132 */
133int nfsv2_procid[NFS_NPROCS] = {
134	NFSV2PROC_NULL,
135	NFSV2PROC_GETATTR,
136	NFSV2PROC_SETATTR,
137	NFSV2PROC_LOOKUP,
138	NFSV2PROC_NOOP,
139	NFSV2PROC_READLINK,
140	NFSV2PROC_READ,
141	NFSV2PROC_WRITE,
142	NFSV2PROC_CREATE,
143	NFSV2PROC_MKDIR,
144	NFSV2PROC_SYMLINK,
145	NFSV2PROC_CREATE,
146	NFSV2PROC_REMOVE,
147	NFSV2PROC_RMDIR,
148	NFSV2PROC_RENAME,
149	NFSV2PROC_LINK,
150	NFSV2PROC_READDIR,
151	NFSV2PROC_NOOP,
152	NFSV2PROC_STATFS,
153	NFSV2PROC_NOOP,
154	NFSV2PROC_NOOP,
155	NFSV2PROC_NOOP,
156	NFSV2PROC_NOOP,
157};
158
159LIST_HEAD(nfsnodehashhead, nfsnode);
160
161u_int32_t
162nfs_xid_gen(void)
163{
164	uint32_t xid;
165
166	mtx_lock(&nfs_xid_mtx);
167
168	/* Get a pretty random xid to start with */
169	if (!nfs_xid)
170		nfs_xid = random();
171	/*
172	 * Skip zero xid if it should ever happen.
173	 */
174	if (++nfs_xid == 0)
175		nfs_xid++;
176	xid = nfs_xid;
177	mtx_unlock(&nfs_xid_mtx);
178	return xid;
179}
180
181/*
182 * Create the header for an rpc request packet
183 * The hsiz is the size of the rest of the nfs request header.
184 * (just used to decide if a cluster is a good idea)
185 */
186struct mbuf *
187nfsm_reqhead(struct vnode *vp, u_long procid, int hsiz)
188{
189	struct mbuf *mb;
190
191	MGET(mb, M_WAIT, MT_DATA);
192	if (hsiz >= MINCLSIZE)
193		MCLGET(mb, M_WAIT);
194	mb->m_len = 0;
195	return (mb);
196}
197
198/*
199 * Build the RPC header and fill in the authorization info.
200 * The authorization string argument is only used when the credentials
201 * come from outside of the kernel.
202 * Returns the head of the mbuf list.
203 */
204struct mbuf *
205nfsm_rpchead(struct ucred *cr, int nmflag, int procid, int auth_type,
206    int auth_len, struct mbuf *mrest, int mrest_len, struct mbuf **mbp,
207    u_int32_t **xidpp)
208{
209	struct mbuf *mb;
210	u_int32_t *tl;
211	caddr_t bpos;
212	int i;
213	struct mbuf *mreq;
214	int grpsiz, authsiz;
215
216	authsiz = nfsm_rndup(auth_len);
217	MGETHDR(mb, M_WAIT, MT_DATA);
218	if ((authsiz + 10 * NFSX_UNSIGNED) >= MINCLSIZE) {
219		MCLGET(mb, M_WAIT);
220	} else if ((authsiz + 10 * NFSX_UNSIGNED) < MHLEN) {
221		MH_ALIGN(mb, authsiz + 10 * NFSX_UNSIGNED);
222	} else {
223		MH_ALIGN(mb, 8 * NFSX_UNSIGNED);
224	}
225	mb->m_len = 0;
226	mreq = mb;
227	bpos = mtod(mb, caddr_t);
228
229	/*
230	 * First the RPC header.
231	 */
232	tl = nfsm_build(u_int32_t *, 8 * NFSX_UNSIGNED);
233
234	*xidpp = tl;
235	*tl++ = txdr_unsigned(nfs_xid_gen());
236	*tl++ = rpc_call;
237	*tl++ = rpc_vers;
238	*tl++ = txdr_unsigned(NFS_PROG);
239	if (nmflag & NFSMNT_NFSV3) {
240		*tl++ = txdr_unsigned(NFS_VER3);
241		*tl++ = txdr_unsigned(procid);
242	} else {
243		*tl++ = txdr_unsigned(NFS_VER2);
244		*tl++ = txdr_unsigned(nfsv2_procid[procid]);
245	}
246
247	/*
248	 * And then the authorization cred.
249	 */
250	*tl++ = txdr_unsigned(auth_type);
251	*tl = txdr_unsigned(authsiz);
252	switch (auth_type) {
253	case RPCAUTH_UNIX:
254		tl = nfsm_build(u_int32_t *, auth_len);
255		*tl++ = 0;		/* stamp ?? */
256		*tl++ = 0;		/* NULL hostname */
257		*tl++ = txdr_unsigned(cr->cr_uid);
258		*tl++ = txdr_unsigned(cr->cr_groups[0]);
259		grpsiz = (auth_len >> 2) - 5;
260		*tl++ = txdr_unsigned(grpsiz);
261		for (i = 1; i <= grpsiz; i++)
262			*tl++ = txdr_unsigned(cr->cr_groups[i]);
263		break;
264	}
265
266	/*
267	 * And the verifier...
268	 */
269	tl = nfsm_build(u_int32_t *, 2 * NFSX_UNSIGNED);
270	*tl++ = txdr_unsigned(RPCAUTH_NULL);
271	*tl = 0;
272	mb->m_next = mrest;
273	mreq->m_pkthdr.len = authsiz + 10 * NFSX_UNSIGNED + mrest_len;
274	mreq->m_pkthdr.rcvif = NULL;
275	*mbp = mb;
276	return (mreq);
277}
278
279/*
280 * copies a uio scatter/gather list to an mbuf chain.
281 * NOTE: can ony handle iovcnt == 1
282 */
283int
284nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos)
285{
286	char *uiocp;
287	struct mbuf *mp, *mp2;
288	int xfer, left, mlen;
289	int uiosiz, clflg, rem;
290	char *cp;
291
292#ifdef DIAGNOSTIC
293	if (uiop->uio_iovcnt != 1)
294		panic("nfsm_uiotombuf: iovcnt != 1");
295#endif
296
297	if (siz > MLEN)		/* or should it >= MCLBYTES ?? */
298		clflg = 1;
299	else
300		clflg = 0;
301	rem = nfsm_rndup(siz)-siz;
302	mp = mp2 = *mq;
303	while (siz > 0) {
304		left = uiop->uio_iov->iov_len;
305		uiocp = uiop->uio_iov->iov_base;
306		if (left > siz)
307			left = siz;
308		uiosiz = left;
309		while (left > 0) {
310			mlen = M_TRAILINGSPACE(mp);
311			if (mlen == 0) {
312				MGET(mp, M_WAIT, MT_DATA);
313				if (clflg)
314					MCLGET(mp, M_WAIT);
315				mp->m_len = 0;
316				mp2->m_next = mp;
317				mp2 = mp;
318				mlen = M_TRAILINGSPACE(mp);
319			}
320			xfer = (left > mlen) ? mlen : left;
321#ifdef notdef
322			/* Not Yet.. */
323			if (uiop->uio_iov->iov_op != NULL)
324				(*(uiop->uio_iov->iov_op))
325				(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
326			else
327#endif
328			if (uiop->uio_segflg == UIO_SYSSPACE)
329				bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
330			else
331				copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
332			mp->m_len += xfer;
333			left -= xfer;
334			uiocp += xfer;
335			uiop->uio_offset += xfer;
336			uiop->uio_resid -= xfer;
337		}
338		uiop->uio_iov->iov_base =
339		    (char *)uiop->uio_iov->iov_base + uiosiz;
340		uiop->uio_iov->iov_len -= uiosiz;
341		siz -= uiosiz;
342	}
343	if (rem > 0) {
344		if (rem > M_TRAILINGSPACE(mp)) {
345			MGET(mp, M_WAIT, MT_DATA);
346			mp->m_len = 0;
347			mp2->m_next = mp;
348		}
349		cp = mtod(mp, caddr_t)+mp->m_len;
350		for (left = 0; left < rem; left++)
351			*cp++ = '\0';
352		mp->m_len += rem;
353		*bpos = cp;
354	} else
355		*bpos = mtod(mp, caddr_t)+mp->m_len;
356	*mq = mp;
357	return (0);
358}
359
360/*
361 * Copy a string into mbufs for the hard cases...
362 */
363int
364nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz)
365{
366	struct mbuf *m1 = NULL, *m2;
367	long left, xfer, len, tlen;
368	u_int32_t *tl;
369	int putsize;
370
371	putsize = 1;
372	m2 = *mb;
373	left = M_TRAILINGSPACE(m2);
374	if (left > 0) {
375		tl = ((u_int32_t *)(*bpos));
376		*tl++ = txdr_unsigned(siz);
377		putsize = 0;
378		left -= NFSX_UNSIGNED;
379		m2->m_len += NFSX_UNSIGNED;
380		if (left > 0) {
381			bcopy(cp, (caddr_t) tl, left);
382			siz -= left;
383			cp += left;
384			m2->m_len += left;
385			left = 0;
386		}
387	}
388	/* Loop around adding mbufs */
389	while (siz > 0) {
390		MGET(m1, M_WAIT, MT_DATA);
391		if (siz > MLEN)
392			MCLGET(m1, M_WAIT);
393		m1->m_len = NFSMSIZ(m1);
394		m2->m_next = m1;
395		m2 = m1;
396		tl = mtod(m1, u_int32_t *);
397		tlen = 0;
398		if (putsize) {
399			*tl++ = txdr_unsigned(siz);
400			m1->m_len -= NFSX_UNSIGNED;
401			tlen = NFSX_UNSIGNED;
402			putsize = 0;
403		}
404		if (siz < m1->m_len) {
405			len = nfsm_rndup(siz);
406			xfer = siz;
407			if (xfer < len)
408				*(tl+(xfer>>2)) = 0;
409		} else {
410			xfer = len = m1->m_len;
411		}
412		bcopy(cp, (caddr_t) tl, xfer);
413		m1->m_len = len+tlen;
414		siz -= xfer;
415		cp += xfer;
416	}
417	*mb = m1;
418	*bpos = mtod(m1, caddr_t)+m1->m_len;
419	return (0);
420}
421
422/*
423 * Called once to initialize data structures...
424 */
425int
426nfs_init(struct vfsconf *vfsp)
427{
428	int i;
429
430	nfsmount_zone = uma_zcreate("NFSMOUNT", sizeof(struct nfsmount),
431	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
432	rpc_vers = txdr_unsigned(RPC_VER2);
433	rpc_call = txdr_unsigned(RPC_CALL);
434	rpc_reply = txdr_unsigned(RPC_REPLY);
435	rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
436	rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
437	rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
438	rpc_autherr = txdr_unsigned(RPC_AUTHERR);
439	rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
440	nfs_true = txdr_unsigned(TRUE);
441	nfs_false = txdr_unsigned(FALSE);
442	nfs_xdrneg1 = txdr_unsigned(-1);
443	nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
444	if (nfs_ticks < 1)
445		nfs_ticks = 1;
446	/* Ensure async daemons disabled */
447	for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
448		nfs_iodwant[i] = NULL;
449		nfs_iodmount[i] = NULL;
450	}
451	nfs_nhinit();			/* Init the nfsnode table */
452
453	/*
454	 * Initialize reply list and start timer
455	 */
456#ifdef NFS_LEGACYRPC
457	TAILQ_INIT(&nfs_reqq);
458	mtx_init(&nfs_reqq_mtx, "NFS reqq lock", NULL, MTX_DEF);
459	callout_init(&nfs_callout, CALLOUT_MPSAFE);
460#endif
461	mtx_init(&nfs_iod_mtx, "NFS iod lock", NULL, MTX_DEF);
462	mtx_init(&nfs_xid_mtx, "NFS xid lock", NULL, MTX_DEF);
463
464	nfs_pbuf_freecnt = nswbuf / 2 + 1;
465
466	return (0);
467}
468
469int
470nfs_uninit(struct vfsconf *vfsp)
471{
472	int i;
473
474#ifdef NFS_LEGACYRPC
475	callout_stop(&nfs_callout);
476
477	KASSERT(TAILQ_EMPTY(&nfs_reqq),
478	    ("nfs_uninit: request queue not empty"));
479#endif
480
481	/*
482	 * Tell all nfsiod processes to exit. Clear nfs_iodmax, and wakeup
483	 * any sleeping nfsiods so they check nfs_iodmax and exit.
484	 */
485	mtx_lock(&nfs_iod_mtx);
486	nfs_iodmax = 0;
487	for (i = 0; i < nfs_numasync; i++)
488		if (nfs_iodwant[i])
489			wakeup(&nfs_iodwant[i]);
490	/* The last nfsiod to exit will wake us up when nfs_numasync hits 0 */
491	while (nfs_numasync)
492		msleep(&nfs_numasync, &nfs_iod_mtx, PWAIT, "ioddie", 0);
493	mtx_unlock(&nfs_iod_mtx);
494	nfs_nhuninit();
495	uma_zdestroy(nfsmount_zone);
496	return (0);
497}
498
499void
500nfs_dircookie_lock(struct nfsnode *np)
501{
502	mtx_lock(&np->n_mtx);
503	while (np->n_flag & NDIRCOOKIELK)
504		(void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0);
505	np->n_flag |= NDIRCOOKIELK;
506	mtx_unlock(&np->n_mtx);
507}
508
509void
510nfs_dircookie_unlock(struct nfsnode *np)
511{
512	mtx_lock(&np->n_mtx);
513	np->n_flag &= ~NDIRCOOKIELK;
514	wakeup(&np->n_flag);
515	mtx_unlock(&np->n_mtx);
516}
517
518int
519nfs_upgrade_vnlock(struct vnode *vp)
520{
521	int old_lock;
522
523 	if ((old_lock = VOP_ISLOCKED(vp)) != LK_EXCLUSIVE) {
524 		if (old_lock == LK_SHARED) {
525 			/* Upgrade to exclusive lock, this might block */
526 			vn_lock(vp, LK_UPGRADE | LK_RETRY);
527 		} else {
528 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
529 		}
530  	}
531	return old_lock;
532}
533
534void
535nfs_downgrade_vnlock(struct vnode *vp, int old_lock)
536{
537	if (old_lock != LK_EXCLUSIVE) {
538 		if (old_lock == LK_SHARED) {
539 			/* Downgrade from exclusive lock, this might block */
540 			vn_lock(vp, LK_DOWNGRADE);
541 		} else {
542 			VOP_UNLOCK(vp, 0);
543 		}
544  	}
545}
546
547void
548nfs_printf(const char *fmt, ...)
549{
550	va_list ap;
551
552	mtx_lock(&Giant);
553	va_start(ap, fmt);
554	printf(fmt, ap);
555	va_end(ap);
556	mtx_unlock(&Giant);
557}
558
559/*
560 * Attribute cache routines.
561 * nfs_loadattrcache() - loads or updates the cache contents from attributes
562 *	that are on the mbuf list
563 * nfs_getattrcache() - returns valid attributes if found in cache, returns
564 *	error otherwise
565 */
566
567/*
568 * Load the attribute cache (that lives in the nfsnode entry) with
569 * the values on the mbuf list and
570 * Iff vap not NULL
571 *    copy the attributes to *vaper
572 */
573int
574nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
575		  struct vattr *vaper, int dontshrink)
576{
577	struct vnode *vp = *vpp;
578	struct vattr *vap;
579	struct nfs_fattr *fp;
580	struct nfsnode *np = NULL;
581	int32_t t1;
582	caddr_t cp2;
583	int rdev;
584	struct mbuf *md;
585	enum vtype vtyp;
586	u_short vmode;
587	struct timespec mtime, mtime_save;
588	int v3 = NFS_ISV3(vp);
589	struct thread *td = curthread;
590	int error = 0;
591
592	md = *mdp;
593	t1 = (mtod(md, caddr_t) + md->m_len) - *dposp;
594	cp2 = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, M_WAIT);
595	if (cp2 == NULL) {
596		error = EBADRPC;
597		goto out;
598	}
599	fp = (struct nfs_fattr *)cp2;
600	if (v3) {
601		vtyp = nfsv3tov_type(fp->fa_type);
602		vmode = fxdr_unsigned(u_short, fp->fa_mode);
603		rdev = makedev(fxdr_unsigned(int, fp->fa3_rdev.specdata1),
604			fxdr_unsigned(int, fp->fa3_rdev.specdata2));
605		fxdr_nfsv3time(&fp->fa3_mtime, &mtime);
606	} else {
607		vtyp = nfsv2tov_type(fp->fa_type);
608		vmode = fxdr_unsigned(u_short, fp->fa_mode);
609		/*
610		 * XXX
611		 *
612		 * The duplicate information returned in fa_type and fa_mode
613		 * is an ambiguity in the NFS version 2 protocol.
614		 *
615		 * VREG should be taken literally as a regular file.  If a
616		 * server intents to return some type information differently
617		 * in the upper bits of the mode field (e.g. for sockets, or
618		 * FIFOs), NFSv2 mandates fa_type to be VNON.  Anyway, we
619		 * leave the examination of the mode bits even in the VREG
620		 * case to avoid breakage for bogus servers, but we make sure
621		 * that there are actually type bits set in the upper part of
622		 * fa_mode (and failing that, trust the va_type field).
623		 *
624		 * NFSv3 cleared the issue, and requires fa_mode to not
625		 * contain any type information (while also introduing sockets
626		 * and FIFOs for fa_type).
627		 */
628		if (vtyp == VNON || (vtyp == VREG && (vmode & S_IFMT) != 0))
629			vtyp = IFTOVT(vmode);
630		rdev = fxdr_unsigned(int32_t, fp->fa2_rdev);
631		fxdr_nfsv2time(&fp->fa2_mtime, &mtime);
632
633		/*
634		 * Really ugly NFSv2 kludge.
635		 */
636		if (vtyp == VCHR && rdev == 0xffffffff)
637			vtyp = VFIFO;
638	}
639
640	/*
641	 * If v_type == VNON it is a new node, so fill in the v_type,
642	 * n_mtime fields. Check to see if it represents a special
643	 * device, and if so, check for a possible alias. Once the
644	 * correct vnode has been obtained, fill in the rest of the
645	 * information.
646	 */
647	np = VTONFS(vp);
648	mtx_lock(&np->n_mtx);
649	if (vp->v_type != vtyp) {
650		vp->v_type = vtyp;
651		if (vp->v_type == VFIFO)
652			vp->v_op = &nfs_fifoops;
653		np->n_mtime = mtime;
654	}
655	vap = &np->n_vattr;
656	vap->va_type = vtyp;
657	vap->va_mode = (vmode & 07777);
658	vap->va_rdev = rdev;
659	mtime_save = vap->va_mtime;
660	vap->va_mtime = mtime;
661	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
662	if (v3) {
663		vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
664		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
665		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
666		vap->va_size = fxdr_hyper(&fp->fa3_size);
667		vap->va_blocksize = NFS_FABLKSIZE;
668		vap->va_bytes = fxdr_hyper(&fp->fa3_used);
669		vap->va_fileid = fxdr_unsigned(int32_t,
670		    fp->fa3_fileid.nfsuquad[1]);
671		fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime);
672		fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime);
673		vap->va_flags = 0;
674		vap->va_filerev = 0;
675	} else {
676		vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
677		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
678		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
679		vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
680		vap->va_blocksize = fxdr_unsigned(int32_t, fp->fa2_blocksize);
681		vap->va_bytes = (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks)
682		    * NFS_FABLKSIZE;
683		vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid);
684		fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime);
685		vap->va_flags = 0;
686		vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t,
687		    fp->fa2_ctime.nfsv2_sec);
688		vap->va_ctime.tv_nsec = 0;
689		vap->va_gen = fxdr_unsigned(u_int32_t, fp->fa2_ctime.nfsv2_usec);
690		vap->va_filerev = 0;
691	}
692	np->n_attrstamp = time_second;
693	/* Timestamp the NFS otw getattr fetch */
694	if (td->td_proc) {
695		np->n_ac_ts_tid = td->td_tid;
696		np->n_ac_ts_pid = td->td_proc->p_pid;
697		np->n_ac_ts_syscalls = td->td_syscalls;
698	} else
699		bzero(&np->n_ac_ts, sizeof(struct nfs_attrcache_timestamp));
700
701	if (vap->va_size != np->n_size) {
702		if (vap->va_type == VREG) {
703			if (dontshrink && vap->va_size < np->n_size) {
704				/*
705				 * We've been told not to shrink the file;
706				 * zero np->n_attrstamp to indicate that
707				 * the attributes are stale.
708				 */
709				vap->va_size = np->n_size;
710				np->n_attrstamp = 0;
711				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
712			} else if (np->n_flag & NMODIFIED) {
713				/*
714				 * We've modified the file: Use the larger
715				 * of our size, and the server's size.
716				 */
717				if (vap->va_size < np->n_size) {
718					vap->va_size = np->n_size;
719				} else {
720					np->n_size = vap->va_size;
721					np->n_flag |= NSIZECHANGED;
722				}
723			} else {
724				np->n_size = vap->va_size;
725				np->n_flag |= NSIZECHANGED;
726			}
727			vnode_pager_setsize(vp, np->n_size);
728		} else {
729			np->n_size = vap->va_size;
730		}
731	}
732	/*
733	 * The following checks are added to prevent a race between (say)
734	 * a READDIR+ and a WRITE.
735	 * READDIR+, WRITE requests sent out.
736	 * READDIR+ resp, WRITE resp received on client.
737	 * However, the WRITE resp was handled before the READDIR+ resp
738	 * causing the post op attrs from the write to be loaded first
739	 * and the attrs from the READDIR+ to be loaded later. If this
740	 * happens, we have stale attrs loaded into the attrcache.
741	 * We detect this by for the mtime moving back. We invalidate the
742	 * attrcache when this happens.
743	 */
744	if (timespeccmp(&mtime_save, &vap->va_mtime, >)) {
745		/* Size changed or mtime went backwards */
746		np->n_attrstamp = 0;
747		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
748	}
749	if (vaper != NULL) {
750		bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
751		if (np->n_flag & NCHG) {
752			if (np->n_flag & NACC)
753				vaper->va_atime = np->n_atim;
754			if (np->n_flag & NUPD)
755				vaper->va_mtime = np->n_mtim;
756		}
757	}
758
759#ifdef KDTRACE_HOOKS
760	if (np->n_attrstamp != 0)
761		KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, &np->n_vattr, 0);
762#endif
763	mtx_unlock(&np->n_mtx);
764out:
765#ifdef KDTRACE_HOOKS
766	if (error)
767		KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, NULL, error);
768#endif
769	return (error);
770}
771
772#ifdef NFS_ACDEBUG
773#include <sys/sysctl.h>
774SYSCTL_DECL(_vfs_nfs);
775static int nfs_acdebug;
776SYSCTL_INT(_vfs_nfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0,
777    "Toggle acdebug (access cache debug) flag");
778#endif
779
780/*
781 * Check the time stamp
782 * If the cache is valid, copy contents to *vap and return 0
783 * otherwise return an error
784 */
785int
786nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
787{
788	struct nfsnode *np;
789	struct vattr *vap;
790	struct nfsmount *nmp;
791	int timeo;
792
793	np = VTONFS(vp);
794	vap = &np->n_vattr;
795	nmp = VFSTONFS(vp->v_mount);
796#ifdef NFS_ACDEBUG
797	mtx_lock(&Giant);	/* nfs_printf() */
798#endif
799	mtx_lock(&np->n_mtx);
800	/* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
801	timeo = (time_second - np->n_mtime.tv_sec) / 10;
802
803#ifdef NFS_ACDEBUG
804	if (nfs_acdebug>1)
805		nfs_printf("nfs_getattrcache: initial timeo = %d\n", timeo);
806#endif
807
808	if (vap->va_type == VDIR) {
809		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin)
810			timeo = nmp->nm_acdirmin;
811		else if (timeo > nmp->nm_acdirmax)
812			timeo = nmp->nm_acdirmax;
813	} else {
814		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin)
815			timeo = nmp->nm_acregmin;
816		else if (timeo > nmp->nm_acregmax)
817			timeo = nmp->nm_acregmax;
818	}
819
820#ifdef NFS_ACDEBUG
821	if (nfs_acdebug > 2)
822		nfs_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n",
823			   nmp->nm_acregmin, nmp->nm_acregmax,
824			   nmp->nm_acdirmin, nmp->nm_acdirmax);
825
826	if (nfs_acdebug)
827		nfs_printf("nfs_getattrcache: age = %d; final timeo = %d\n",
828			   (time_second - np->n_attrstamp), timeo);
829#endif
830
831	if ((time_second - np->n_attrstamp) >= timeo) {
832		nfsstats.attrcache_misses++;
833		mtx_unlock(&np->n_mtx);
834		KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
835		return (ENOENT);
836	}
837	nfsstats.attrcache_hits++;
838	if (vap->va_size != np->n_size) {
839		if (vap->va_type == VREG) {
840			if (np->n_flag & NMODIFIED) {
841				if (vap->va_size < np->n_size)
842					vap->va_size = np->n_size;
843				else
844					np->n_size = vap->va_size;
845			} else {
846				np->n_size = vap->va_size;
847			}
848			vnode_pager_setsize(vp, np->n_size);
849		} else {
850			np->n_size = vap->va_size;
851		}
852	}
853	bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr));
854	if (np->n_flag & NCHG) {
855		if (np->n_flag & NACC)
856			vaper->va_atime = np->n_atim;
857		if (np->n_flag & NUPD)
858			vaper->va_mtime = np->n_mtim;
859	}
860	mtx_unlock(&np->n_mtx);
861#ifdef NFS_ACDEBUG
862	mtx_unlock(&Giant);	/* nfs_printf() */
863#endif
864	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap);
865	return (0);
866}
867
868static nfsuint64 nfs_nullcookie = { { 0, 0 } };
869/*
870 * This function finds the directory cookie that corresponds to the
871 * logical byte offset given.
872 */
873nfsuint64 *
874nfs_getcookie(struct nfsnode *np, off_t off, int add)
875{
876	struct nfsdmap *dp, *dp2;
877	int pos;
878	nfsuint64 *retval = NULL;
879
880	pos = (uoff_t)off / NFS_DIRBLKSIZ;
881	if (pos == 0 || off < 0) {
882#ifdef DIAGNOSTIC
883		if (add)
884			panic("nfs getcookie add at <= 0");
885#endif
886		return (&nfs_nullcookie);
887	}
888	pos--;
889	dp = LIST_FIRST(&np->n_cookies);
890	if (!dp) {
891		if (add) {
892			dp = malloc(sizeof (struct nfsdmap),
893				M_NFSDIROFF, M_WAITOK);
894			dp->ndm_eocookie = 0;
895			LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
896		} else
897			goto out;
898	}
899	while (pos >= NFSNUMCOOKIES) {
900		pos -= NFSNUMCOOKIES;
901		if (LIST_NEXT(dp, ndm_list)) {
902			if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
903			    pos >= dp->ndm_eocookie)
904				goto out;
905			dp = LIST_NEXT(dp, ndm_list);
906		} else if (add) {
907			dp2 = malloc(sizeof (struct nfsdmap),
908				M_NFSDIROFF, M_WAITOK);
909			dp2->ndm_eocookie = 0;
910			LIST_INSERT_AFTER(dp, dp2, ndm_list);
911			dp = dp2;
912		} else
913			goto out;
914	}
915	if (pos >= dp->ndm_eocookie) {
916		if (add)
917			dp->ndm_eocookie = pos + 1;
918		else
919			goto out;
920	}
921	retval = &dp->ndm_cookies[pos];
922out:
923	return (retval);
924}
925
926/*
927 * Invalidate cached directory information, except for the actual directory
928 * blocks (which are invalidated separately).
929 * Done mainly to avoid the use of stale offset cookies.
930 */
931void
932nfs_invaldir(struct vnode *vp)
933{
934	struct nfsnode *np = VTONFS(vp);
935
936#ifdef DIAGNOSTIC
937	if (vp->v_type != VDIR)
938		panic("nfs: invaldir not dir");
939#endif
940	nfs_dircookie_lock(np);
941	np->n_direofoffset = 0;
942	np->n_cookieverf.nfsuquad[0] = 0;
943	np->n_cookieverf.nfsuquad[1] = 0;
944	if (LIST_FIRST(&np->n_cookies))
945		LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0;
946	nfs_dircookie_unlock(np);
947}
948
949/*
950 * The write verifier has changed (probably due to a server reboot), so all
951 * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
952 * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
953 * and B_CLUSTEROK flags.  Once done the new write verifier can be set for the
954 * mount point.
955 *
956 * B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data
957 * writes are not clusterable.
958 */
959void
960nfs_clearcommit(struct mount *mp)
961{
962	struct vnode *vp, *nvp;
963	struct buf *bp, *nbp;
964	struct bufobj *bo;
965
966	MNT_ILOCK(mp);
967	MNT_VNODE_FOREACH(vp, mp, nvp) {
968		bo = &vp->v_bufobj;
969		VI_LOCK(vp);
970		if (vp->v_iflag & VI_DOOMED) {
971			VI_UNLOCK(vp);
972			continue;
973		}
974		vholdl(vp);
975		VI_UNLOCK(vp);
976		MNT_IUNLOCK(mp);
977		BO_LOCK(bo);
978		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
979			if (!BUF_ISLOCKED(bp) &&
980			    (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
981				== (B_DELWRI | B_NEEDCOMMIT))
982				bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
983		}
984		BO_UNLOCK(bo);
985		vdrop(vp);
986		MNT_ILOCK(mp);
987	}
988	MNT_IUNLOCK(mp);
989}
990
991/*
992 * Helper functions for former macros.  Some of these should be
993 * moved to their callers.
994 */
995
996int
997nfsm_mtofh_xx(struct vnode *d, struct vnode **v, int v3, int *f,
998    struct mbuf **md, caddr_t *dpos)
999{
1000	struct nfsnode *ttnp;
1001	struct vnode *ttvp;
1002	nfsfh_t *ttfhp;
1003	u_int32_t *tl;
1004	int ttfhsize;
1005	int t1;
1006
1007	if (v3) {
1008		tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
1009		if (tl == NULL)
1010			return EBADRPC;
1011		*f = fxdr_unsigned(int, *tl);
1012	} else
1013		*f = 1;
1014	if (*f) {
1015		t1 = nfsm_getfh_xx(&ttfhp, &ttfhsize, (v3), md, dpos);
1016		if (t1 != 0)
1017			return t1;
1018		t1 = nfs_nget(d->v_mount, ttfhp, ttfhsize, &ttnp, LK_EXCLUSIVE);
1019		if (t1 != 0)
1020			return t1;
1021		*v = NFSTOV(ttnp);
1022	}
1023	if (v3) {
1024		tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
1025		if (tl == NULL)
1026			return EBADRPC;
1027		if (*f)
1028			*f = fxdr_unsigned(int, *tl);
1029		else if (fxdr_unsigned(int, *tl))
1030			nfsm_adv_xx(NFSX_V3FATTR, md, dpos);
1031	}
1032	if (*f) {
1033		ttvp = *v;
1034		t1 = nfs_loadattrcache(&ttvp, md, dpos, NULL, 0);
1035		if (t1)
1036			return t1;
1037		*v = ttvp;
1038	}
1039	return 0;
1040}
1041
1042int
1043nfsm_getfh_xx(nfsfh_t **f, int *s, int v3, struct mbuf **md, caddr_t *dpos)
1044{
1045	u_int32_t *tl;
1046
1047	if (v3) {
1048		tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
1049		if (tl == NULL)
1050			return EBADRPC;
1051		*s = fxdr_unsigned(int, *tl);
1052		if (*s <= 0 || *s > NFSX_V3FHMAX)
1053			return EBADRPC;
1054	} else
1055		*s = NFSX_V2FH;
1056	*f = nfsm_dissect_xx(nfsm_rndup(*s), md, dpos);
1057	if (*f == NULL)
1058		return EBADRPC;
1059	else
1060		return 0;
1061}
1062
1063
1064int
1065nfsm_loadattr_xx(struct vnode **v, struct vattr *va, struct mbuf **md,
1066		 caddr_t *dpos)
1067{
1068	int t1;
1069
1070	struct vnode *ttvp = *v;
1071	t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 0);
1072	if (t1 != 0)
1073		return t1;
1074	*v = ttvp;
1075	return 0;
1076}
1077
1078int
1079nfsm_postop_attr_xx(struct vnode **v, int *f, struct mbuf **md,
1080		    caddr_t *dpos)
1081{
1082	u_int32_t *tl;
1083	int t1;
1084
1085	struct vnode *ttvp = *v;
1086	tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
1087	if (tl == NULL)
1088		return EBADRPC;
1089	*f = fxdr_unsigned(int, *tl);
1090	if (*f != 0) {
1091		t1 = nfs_loadattrcache(&ttvp, md, dpos, NULL, 1);
1092		if (t1 != 0) {
1093			*f = 0;
1094			return t1;
1095		}
1096		*v = ttvp;
1097	}
1098	return 0;
1099}
1100
1101int
1102nfsm_wcc_data_xx(struct vnode **v, int *f, struct mbuf **md, caddr_t *dpos)
1103{
1104	u_int32_t *tl;
1105	int ttattrf, ttretf = 0;
1106	int t1;
1107
1108	tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
1109	if (tl == NULL)
1110		return EBADRPC;
1111	if (*tl == nfs_true) {
1112		tl = nfsm_dissect_xx(6 * NFSX_UNSIGNED, md, dpos);
1113		if (tl == NULL)
1114			return EBADRPC;
1115		mtx_lock(&(VTONFS(*v))->n_mtx);
1116		if (*f)
1117 			ttretf = (VTONFS(*v)->n_mtime.tv_sec == fxdr_unsigned(u_int32_t, *(tl + 2)) &&
1118				  VTONFS(*v)->n_mtime.tv_nsec == fxdr_unsigned(u_int32_t, *(tl + 3)));
1119		mtx_unlock(&(VTONFS(*v))->n_mtx);
1120	}
1121	t1 = nfsm_postop_attr_xx(v, &ttattrf, md, dpos);
1122	if (t1)
1123		return t1;
1124	if (*f)
1125		*f = ttretf;
1126	else
1127		*f = ttattrf;
1128	return 0;
1129}
1130
1131int
1132nfsm_strtom_xx(const char *a, int s, int m, struct mbuf **mb, caddr_t *bpos)
1133{
1134	u_int32_t *tl;
1135	int t1;
1136
1137	if (s > m)
1138		return ENAMETOOLONG;
1139	t1 = nfsm_rndup(s) + NFSX_UNSIGNED;
1140	if (t1 <= M_TRAILINGSPACE(*mb)) {
1141		tl = nfsm_build_xx(t1, mb, bpos);
1142		*tl++ = txdr_unsigned(s);
1143		*(tl + ((t1 >> 2) - 2)) = 0;
1144		bcopy(a, tl, s);
1145	} else {
1146		t1 = nfsm_strtmbuf(mb, bpos, a, s);
1147		if (t1 != 0)
1148			return t1;
1149	}
1150	return 0;
1151}
1152
1153int
1154nfsm_fhtom_xx(struct vnode *v, int v3, struct mbuf **mb, caddr_t *bpos)
1155{
1156	u_int32_t *tl;
1157	int t1;
1158	caddr_t cp;
1159
1160	if (v3) {
1161		t1 = nfsm_rndup(VTONFS(v)->n_fhsize) + NFSX_UNSIGNED;
1162		if (t1 < M_TRAILINGSPACE(*mb)) {
1163			tl = nfsm_build_xx(t1, mb, bpos);
1164			*tl++ = txdr_unsigned(VTONFS(v)->n_fhsize);
1165			*(tl + ((t1 >> 2) - 2)) = 0;
1166			bcopy(VTONFS(v)->n_fhp, tl, VTONFS(v)->n_fhsize);
1167		} else {
1168			t1 = nfsm_strtmbuf(mb, bpos,
1169			    (const char *)VTONFS(v)->n_fhp,
1170			    VTONFS(v)->n_fhsize);
1171			if (t1 != 0)
1172				return t1;
1173		}
1174	} else {
1175		cp = nfsm_build_xx(NFSX_V2FH, mb, bpos);
1176		bcopy(VTONFS(v)->n_fhp, cp, NFSX_V2FH);
1177	}
1178	return 0;
1179}
1180
1181void
1182nfsm_v3attrbuild_xx(struct vattr *va, int full, struct mbuf **mb,
1183    caddr_t *bpos)
1184{
1185	u_int32_t *tl;
1186
1187	if (va->va_mode != (mode_t)VNOVAL) {
1188		tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1189		*tl++ = nfs_true;
1190		*tl = txdr_unsigned(va->va_mode);
1191	} else {
1192		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1193		*tl = nfs_false;
1194	}
1195	if (full && va->va_uid != (uid_t)VNOVAL) {
1196		tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1197		*tl++ = nfs_true;
1198		*tl = txdr_unsigned(va->va_uid);
1199	} else {
1200		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1201		*tl = nfs_false;
1202	}
1203	if (full && va->va_gid != (gid_t)VNOVAL) {
1204		tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1205		*tl++ = nfs_true;
1206		*tl = txdr_unsigned(va->va_gid);
1207	} else {
1208		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1209		*tl = nfs_false;
1210	}
1211	if (full && va->va_size != VNOVAL) {
1212		tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1213		*tl++ = nfs_true;
1214		txdr_hyper(va->va_size, tl);
1215	} else {
1216		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1217		*tl = nfs_false;
1218	}
1219	if (va->va_atime.tv_sec != VNOVAL) {
1220		if (va->va_atime.tv_sec != time_second) {
1221			tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1222			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1223			txdr_nfsv3time(&va->va_atime, tl);
1224		} else {
1225			tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1226			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1227		}
1228	} else {
1229		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1230		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1231	}
1232	if (va->va_mtime.tv_sec != VNOVAL) {
1233		if (va->va_mtime.tv_sec != time_second) {
1234			tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1235			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1236			txdr_nfsv3time(&va->va_mtime, tl);
1237		} else {
1238			tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1239			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1240		}
1241	} else {
1242		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1243		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1244	}
1245}
1246