nfs_subs.c revision 247502
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	@(#)nfs_subs.c  8.8 (Berkeley) 5/22/95
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/9/sys/nfsclient/nfs_subs.c 247502 2013-02-28 21:57:38Z jhb $");
37
38/*
39 * These functions support the macros and help fiddle mbuf chains for
40 * the nfs op functions. They do things like create the rpc header and
41 * copy data between mbuf chains and uio lists.
42 */
43
44#include "opt_kdtrace.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/bio.h>
50#include <sys/buf.h>
51#include <sys/proc.h>
52#include <sys/mount.h>
53#include <sys/vnode.h>
54#include <sys/namei.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57#include <sys/stat.h>
58#include <sys/malloc.h>
59#include <sys/sysent.h>
60#include <sys/syscall.h>
61#include <sys/sysproto.h>
62#include <sys/taskqueue.h>
63
64#include <vm/vm.h>
65#include <vm/vm_object.h>
66#include <vm/vm_extern.h>
67#include <vm/uma.h>
68
69#include <nfs/nfsproto.h>
70#include <nfsclient/nfs.h>
71#include <nfsclient/nfsnode.h>
72#include <nfs/nfs_kdtrace.h>
73#include <nfs/xdr_subs.h>
74#include <nfsclient/nfsm_subs.h>
75#include <nfsclient/nfsmount.h>
76
77#include <netinet/in.h>
78
79/*
80 * Note that stdarg.h and the ANSI style va_start macro is used for both
81 * ANSI and traditional C compilers.
82 */
83#include <machine/stdarg.h>
84
85#ifdef KDTRACE_HOOKS
86dtrace_nfsclient_attrcache_flush_probe_func_t
87    dtrace_nfsclient_attrcache_flush_done_probe;
88uint32_t nfsclient_attrcache_flush_done_id;
89
90dtrace_nfsclient_attrcache_get_hit_probe_func_t
91    dtrace_nfsclient_attrcache_get_hit_probe;
92uint32_t nfsclient_attrcache_get_hit_id;
93
94dtrace_nfsclient_attrcache_get_miss_probe_func_t
95    dtrace_nfsclient_attrcache_get_miss_probe;
96uint32_t nfsclient_attrcache_get_miss_id;
97
98dtrace_nfsclient_attrcache_load_probe_func_t
99    dtrace_nfsclient_attrcache_load_done_probe;
100uint32_t nfsclient_attrcache_load_done_id;
101#endif /* !KDTRACE_HOOKS */
102
103/*
104 * Data items converted to xdr at startup, since they are constant
105 * This is kinda hokey, but may save a little time doing byte swaps
106 */
107u_int32_t	nfs_xdrneg1;
108u_int32_t	nfs_true, nfs_false;
109
110/* And other global data */
111static u_int32_t nfs_xid = 0;
112static enum vtype nv2tov_type[8]= {
113	VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON,  VNON
114};
115
116int		nfs_ticks;
117int		nfs_pbuf_freecnt = -1;	/* start out unlimited */
118
119struct nfs_bufq	nfs_bufq;
120static struct mtx nfs_xid_mtx;
121struct task	nfs_nfsiodnew_task;
122
123/*
124 * and the reverse mapping from generic to Version 2 procedure numbers
125 */
126int nfsv2_procid[NFS_NPROCS] = {
127	NFSV2PROC_NULL,
128	NFSV2PROC_GETATTR,
129	NFSV2PROC_SETATTR,
130	NFSV2PROC_LOOKUP,
131	NFSV2PROC_NOOP,
132	NFSV2PROC_READLINK,
133	NFSV2PROC_READ,
134	NFSV2PROC_WRITE,
135	NFSV2PROC_CREATE,
136	NFSV2PROC_MKDIR,
137	NFSV2PROC_SYMLINK,
138	NFSV2PROC_CREATE,
139	NFSV2PROC_REMOVE,
140	NFSV2PROC_RMDIR,
141	NFSV2PROC_RENAME,
142	NFSV2PROC_LINK,
143	NFSV2PROC_READDIR,
144	NFSV2PROC_NOOP,
145	NFSV2PROC_STATFS,
146	NFSV2PROC_NOOP,
147	NFSV2PROC_NOOP,
148	NFSV2PROC_NOOP,
149	NFSV2PROC_NOOP,
150};
151
152LIST_HEAD(nfsnodehashhead, nfsnode);
153
154u_int32_t
155nfs_xid_gen(void)
156{
157	uint32_t xid;
158
159	mtx_lock(&nfs_xid_mtx);
160
161	/* Get a pretty random xid to start with */
162	if (!nfs_xid)
163		nfs_xid = random();
164	/*
165	 * Skip zero xid if it should ever happen.
166	 */
167	if (++nfs_xid == 0)
168		nfs_xid++;
169	xid = nfs_xid;
170	mtx_unlock(&nfs_xid_mtx);
171	return xid;
172}
173
174/*
175 * Create the header for an rpc request packet
176 * The hsiz is the size of the rest of the nfs request header.
177 * (just used to decide if a cluster is a good idea)
178 */
179struct mbuf *
180nfsm_reqhead(struct vnode *vp, u_long procid, int hsiz)
181{
182	struct mbuf *mb;
183
184	MGET(mb, M_WAIT, MT_DATA);
185	if (hsiz >= MINCLSIZE)
186		MCLGET(mb, M_WAIT);
187	mb->m_len = 0;
188	return (mb);
189}
190
191/*
192 * copies a uio scatter/gather list to an mbuf chain.
193 * NOTE: can ony handle iovcnt == 1
194 */
195int
196nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos)
197{
198	char *uiocp;
199	struct mbuf *mp, *mp2;
200	int xfer, left, mlen;
201	int uiosiz, clflg, rem;
202	char *cp;
203
204	KASSERT(uiop->uio_iovcnt == 1, ("nfsm_uiotombuf: iovcnt != 1"));
205
206	if (siz > MLEN)		/* or should it >= MCLBYTES ?? */
207		clflg = 1;
208	else
209		clflg = 0;
210	rem = nfsm_rndup(siz)-siz;
211	mp = mp2 = *mq;
212	while (siz > 0) {
213		left = uiop->uio_iov->iov_len;
214		uiocp = uiop->uio_iov->iov_base;
215		if (left > siz)
216			left = siz;
217		uiosiz = left;
218		while (left > 0) {
219			mlen = M_TRAILINGSPACE(mp);
220			if (mlen == 0) {
221				MGET(mp, M_WAIT, MT_DATA);
222				if (clflg)
223					MCLGET(mp, M_WAIT);
224				mp->m_len = 0;
225				mp2->m_next = mp;
226				mp2 = mp;
227				mlen = M_TRAILINGSPACE(mp);
228			}
229			xfer = (left > mlen) ? mlen : left;
230#ifdef notdef
231			/* Not Yet.. */
232			if (uiop->uio_iov->iov_op != NULL)
233				(*(uiop->uio_iov->iov_op))
234				(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
235			else
236#endif
237			if (uiop->uio_segflg == UIO_SYSSPACE)
238				bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
239			else
240				copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
241			mp->m_len += xfer;
242			left -= xfer;
243			uiocp += xfer;
244			uiop->uio_offset += xfer;
245			uiop->uio_resid -= xfer;
246		}
247		uiop->uio_iov->iov_base =
248		    (char *)uiop->uio_iov->iov_base + uiosiz;
249		uiop->uio_iov->iov_len -= uiosiz;
250		siz -= uiosiz;
251	}
252	if (rem > 0) {
253		if (rem > M_TRAILINGSPACE(mp)) {
254			MGET(mp, M_WAIT, MT_DATA);
255			mp->m_len = 0;
256			mp2->m_next = mp;
257		}
258		cp = mtod(mp, caddr_t)+mp->m_len;
259		for (left = 0; left < rem; left++)
260			*cp++ = '\0';
261		mp->m_len += rem;
262		*bpos = cp;
263	} else
264		*bpos = mtod(mp, caddr_t)+mp->m_len;
265	*mq = mp;
266	return (0);
267}
268
269/*
270 * Copy a string into mbufs for the hard cases...
271 */
272int
273nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz)
274{
275	struct mbuf *m1 = NULL, *m2;
276	long left, xfer, len, tlen;
277	u_int32_t *tl;
278	int putsize;
279
280	putsize = 1;
281	m2 = *mb;
282	left = M_TRAILINGSPACE(m2);
283	if (left > 0) {
284		tl = ((u_int32_t *)(*bpos));
285		*tl++ = txdr_unsigned(siz);
286		putsize = 0;
287		left -= NFSX_UNSIGNED;
288		m2->m_len += NFSX_UNSIGNED;
289		if (left > 0) {
290			bcopy(cp, (caddr_t) tl, left);
291			siz -= left;
292			cp += left;
293			m2->m_len += left;
294			left = 0;
295		}
296	}
297	/* Loop around adding mbufs */
298	while (siz > 0) {
299		MGET(m1, M_WAIT, MT_DATA);
300		if (siz > MLEN)
301			MCLGET(m1, M_WAIT);
302		m1->m_len = NFSMSIZ(m1);
303		m2->m_next = m1;
304		m2 = m1;
305		tl = mtod(m1, u_int32_t *);
306		tlen = 0;
307		if (putsize) {
308			*tl++ = txdr_unsigned(siz);
309			m1->m_len -= NFSX_UNSIGNED;
310			tlen = NFSX_UNSIGNED;
311			putsize = 0;
312		}
313		if (siz < m1->m_len) {
314			len = nfsm_rndup(siz);
315			xfer = siz;
316			if (xfer < len)
317				*(tl+(xfer>>2)) = 0;
318		} else {
319			xfer = len = m1->m_len;
320		}
321		bcopy(cp, (caddr_t) tl, xfer);
322		m1->m_len = len+tlen;
323		siz -= xfer;
324		cp += xfer;
325	}
326	*mb = m1;
327	*bpos = mtod(m1, caddr_t)+m1->m_len;
328	return (0);
329}
330
331/*
332 * Called once to initialize data structures...
333 */
334int
335nfs_init(struct vfsconf *vfsp)
336{
337	int i;
338
339	nfsmount_zone = uma_zcreate("NFSMOUNT", sizeof(struct nfsmount),
340	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
341	nfs_true = txdr_unsigned(TRUE);
342	nfs_false = txdr_unsigned(FALSE);
343	nfs_xdrneg1 = txdr_unsigned(-1);
344	nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
345	if (nfs_ticks < 1)
346		nfs_ticks = 1;
347	/* Ensure async daemons disabled */
348	for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
349		nfs_iodwant[i] = NFSIOD_NOT_AVAILABLE;
350		nfs_iodmount[i] = NULL;
351	}
352	nfs_nhinit();			/* Init the nfsnode table */
353
354	/*
355	 * Initialize reply list and start timer
356	 */
357	mtx_init(&nfs_iod_mtx, "NFS iod lock", NULL, MTX_DEF);
358	mtx_init(&nfs_xid_mtx, "NFS xid lock", NULL, MTX_DEF);
359	TASK_INIT(&nfs_nfsiodnew_task, 0, nfs_nfsiodnew_tq, NULL);
360
361	nfs_pbuf_freecnt = nswbuf / 2 + 1;
362
363	return (0);
364}
365
366int
367nfs_uninit(struct vfsconf *vfsp)
368{
369	int i;
370
371	/*
372	 * Tell all nfsiod processes to exit. Clear nfs_iodmax, and wakeup
373	 * any sleeping nfsiods so they check nfs_iodmax and exit.
374	 * Drain nfsiodnew task before we wait for them to finish.
375	 */
376	mtx_lock(&nfs_iod_mtx);
377	nfs_iodmax = 0;
378	mtx_unlock(&nfs_iod_mtx);
379	taskqueue_drain(taskqueue_thread, &nfs_nfsiodnew_task);
380	mtx_lock(&nfs_iod_mtx);
381	for (i = 0; i < nfs_numasync; i++)
382		if (nfs_iodwant[i] == NFSIOD_AVAILABLE)
383			wakeup(&nfs_iodwant[i]);
384	/* The last nfsiod to exit will wake us up when nfs_numasync hits 0 */
385	while (nfs_numasync)
386		msleep(&nfs_numasync, &nfs_iod_mtx, PWAIT, "ioddie", 0);
387	mtx_unlock(&nfs_iod_mtx);
388	nfs_nhuninit();
389	uma_zdestroy(nfsmount_zone);
390	return (0);
391}
392
393void
394nfs_dircookie_lock(struct nfsnode *np)
395{
396	mtx_lock(&np->n_mtx);
397	while (np->n_flag & NDIRCOOKIELK)
398		(void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0);
399	np->n_flag |= NDIRCOOKIELK;
400	mtx_unlock(&np->n_mtx);
401}
402
403void
404nfs_dircookie_unlock(struct nfsnode *np)
405{
406	mtx_lock(&np->n_mtx);
407	np->n_flag &= ~NDIRCOOKIELK;
408	wakeup(&np->n_flag);
409	mtx_unlock(&np->n_mtx);
410}
411
412int
413nfs_upgrade_vnlock(struct vnode *vp)
414{
415	int old_lock;
416
417	ASSERT_VOP_LOCKED(vp, "nfs_upgrade_vnlock");
418	old_lock = VOP_ISLOCKED(vp);
419	if (old_lock != LK_EXCLUSIVE) {
420		KASSERT(old_lock == LK_SHARED,
421		    ("nfs_upgrade_vnlock: wrong old_lock %d", old_lock));
422		/* Upgrade to exclusive lock, this might block */
423		vn_lock(vp, LK_UPGRADE | LK_RETRY);
424  	}
425	return (old_lock);
426}
427
428void
429nfs_downgrade_vnlock(struct vnode *vp, int old_lock)
430{
431	if (old_lock != LK_EXCLUSIVE) {
432		KASSERT(old_lock == LK_SHARED, ("wrong old_lock %d", old_lock));
433		/* Downgrade from exclusive lock. */
434		vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
435  	}
436}
437
438void
439nfs_printf(const char *fmt, ...)
440{
441	va_list ap;
442
443	mtx_lock(&Giant);
444	va_start(ap, fmt);
445	vprintf(fmt, ap);
446	va_end(ap);
447	mtx_unlock(&Giant);
448}
449
450/*
451 * Attribute cache routines.
452 * nfs_loadattrcache() - loads or updates the cache contents from attributes
453 *	that are on the mbuf list
454 * nfs_getattrcache() - returns valid attributes if found in cache, returns
455 *	error otherwise
456 */
457
458/*
459 * Load the attribute cache (that lives in the nfsnode entry) with
460 * the values on the mbuf list and
461 * Iff vap not NULL
462 *    copy the attributes to *vaper
463 */
464int
465nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
466		  struct vattr *vaper, int dontshrink)
467{
468	struct vnode *vp = *vpp;
469	struct vattr *vap;
470	struct nfs_fattr *fp;
471	struct nfsnode *np = NULL;
472	int32_t t1;
473	caddr_t cp2;
474	int rdev;
475	struct mbuf *md;
476	enum vtype vtyp;
477	u_short vmode;
478	struct timespec mtime, mtime_save;
479	int v3 = NFS_ISV3(vp);
480	int error = 0;
481
482	md = *mdp;
483	t1 = (mtod(md, caddr_t) + md->m_len) - *dposp;
484	cp2 = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, M_WAIT);
485	if (cp2 == NULL) {
486		error = EBADRPC;
487		goto out;
488	}
489	fp = (struct nfs_fattr *)cp2;
490	if (v3) {
491		vtyp = nfsv3tov_type(fp->fa_type);
492		vmode = fxdr_unsigned(u_short, fp->fa_mode);
493		rdev = makedev(fxdr_unsigned(int, fp->fa3_rdev.specdata1),
494			fxdr_unsigned(int, fp->fa3_rdev.specdata2));
495		fxdr_nfsv3time(&fp->fa3_mtime, &mtime);
496	} else {
497		vtyp = nfsv2tov_type(fp->fa_type);
498		vmode = fxdr_unsigned(u_short, fp->fa_mode);
499		/*
500		 * XXX
501		 *
502		 * The duplicate information returned in fa_type and fa_mode
503		 * is an ambiguity in the NFS version 2 protocol.
504		 *
505		 * VREG should be taken literally as a regular file.  If a
506		 * server intents to return some type information differently
507		 * in the upper bits of the mode field (e.g. for sockets, or
508		 * FIFOs), NFSv2 mandates fa_type to be VNON.  Anyway, we
509		 * leave the examination of the mode bits even in the VREG
510		 * case to avoid breakage for bogus servers, but we make sure
511		 * that there are actually type bits set in the upper part of
512		 * fa_mode (and failing that, trust the va_type field).
513		 *
514		 * NFSv3 cleared the issue, and requires fa_mode to not
515		 * contain any type information (while also introduing sockets
516		 * and FIFOs for fa_type).
517		 */
518		if (vtyp == VNON || (vtyp == VREG && (vmode & S_IFMT) != 0))
519			vtyp = IFTOVT(vmode);
520		rdev = fxdr_unsigned(int32_t, fp->fa2_rdev);
521		fxdr_nfsv2time(&fp->fa2_mtime, &mtime);
522
523		/*
524		 * Really ugly NFSv2 kludge.
525		 */
526		if (vtyp == VCHR && rdev == 0xffffffff)
527			vtyp = VFIFO;
528	}
529
530	/*
531	 * If v_type == VNON it is a new node, so fill in the v_type,
532	 * n_mtime fields. Check to see if it represents a special
533	 * device, and if so, check for a possible alias. Once the
534	 * correct vnode has been obtained, fill in the rest of the
535	 * information.
536	 */
537	np = VTONFS(vp);
538	mtx_lock(&np->n_mtx);
539	if (vp->v_type != vtyp) {
540		vp->v_type = vtyp;
541		if (vp->v_type == VFIFO)
542			vp->v_op = &nfs_fifoops;
543		np->n_mtime = mtime;
544	}
545	vap = &np->n_vattr;
546	vap->va_type = vtyp;
547	vap->va_mode = (vmode & 07777);
548	vap->va_rdev = rdev;
549	mtime_save = vap->va_mtime;
550	vap->va_mtime = mtime;
551	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
552	if (v3) {
553		vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
554		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
555		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
556		vap->va_size = fxdr_hyper(&fp->fa3_size);
557		vap->va_blocksize = NFS_FABLKSIZE;
558		vap->va_bytes = fxdr_hyper(&fp->fa3_used);
559		vap->va_fileid = fxdr_unsigned(int32_t,
560		    fp->fa3_fileid.nfsuquad[1]);
561		fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime);
562		fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime);
563		vap->va_flags = 0;
564		vap->va_filerev = 0;
565	} else {
566		vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
567		vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
568		vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
569		vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
570		vap->va_blocksize = fxdr_unsigned(int32_t, fp->fa2_blocksize);
571		vap->va_bytes = (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks)
572		    * NFS_FABLKSIZE;
573		vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid);
574		fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime);
575		vap->va_flags = 0;
576		vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t,
577		    fp->fa2_ctime.nfsv2_sec);
578		vap->va_ctime.tv_nsec = 0;
579		vap->va_gen = fxdr_unsigned(u_int32_t, fp->fa2_ctime.nfsv2_usec);
580		vap->va_filerev = 0;
581	}
582	np->n_attrstamp = time_second;
583	if (vap->va_size != np->n_size) {
584		if (vap->va_type == VREG) {
585			if (dontshrink && vap->va_size < np->n_size) {
586				/*
587				 * We've been told not to shrink the file;
588				 * zero np->n_attrstamp to indicate that
589				 * the attributes are stale.
590				 */
591				vap->va_size = np->n_size;
592				np->n_attrstamp = 0;
593				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
594			} else if (np->n_flag & NMODIFIED) {
595				/*
596				 * We've modified the file: Use the larger
597				 * of our size, and the server's size.
598				 */
599				if (vap->va_size < np->n_size) {
600					vap->va_size = np->n_size;
601				} else {
602					np->n_size = vap->va_size;
603					np->n_flag |= NSIZECHANGED;
604				}
605			} else {
606				np->n_size = vap->va_size;
607				np->n_flag |= NSIZECHANGED;
608			}
609			vnode_pager_setsize(vp, np->n_size);
610		} else {
611			np->n_size = vap->va_size;
612		}
613	}
614	/*
615	 * The following checks are added to prevent a race between (say)
616	 * a READDIR+ and a WRITE.
617	 * READDIR+, WRITE requests sent out.
618	 * READDIR+ resp, WRITE resp received on client.
619	 * However, the WRITE resp was handled before the READDIR+ resp
620	 * causing the post op attrs from the write to be loaded first
621	 * and the attrs from the READDIR+ to be loaded later. If this
622	 * happens, we have stale attrs loaded into the attrcache.
623	 * We detect this by for the mtime moving back. We invalidate the
624	 * attrcache when this happens.
625	 */
626	if (timespeccmp(&mtime_save, &vap->va_mtime, >)) {
627		/* Size changed or mtime went backwards */
628		np->n_attrstamp = 0;
629		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
630	}
631	if (vaper != NULL) {
632		bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
633		if (np->n_flag & NCHG) {
634			if (np->n_flag & NACC)
635				vaper->va_atime = np->n_atim;
636			if (np->n_flag & NUPD)
637				vaper->va_mtime = np->n_mtim;
638		}
639	}
640
641#ifdef KDTRACE_HOOKS
642	if (np->n_attrstamp != 0)
643		KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, &np->n_vattr, 0);
644#endif
645	mtx_unlock(&np->n_mtx);
646out:
647#ifdef KDTRACE_HOOKS
648	if (error)
649		KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, NULL, error);
650#endif
651	return (error);
652}
653
654#ifdef NFS_ACDEBUG
655#include <sys/sysctl.h>
656SYSCTL_DECL(_vfs_oldnfs);
657static int nfs_acdebug;
658SYSCTL_INT(_vfs_oldnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0,
659    "Toggle acdebug (attribute cache debug) flag");
660#endif
661
662/*
663 * Check the time stamp
664 * If the cache is valid, copy contents to *vap and return 0
665 * otherwise return an error
666 */
667int
668nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
669{
670	struct nfsnode *np;
671	struct vattr *vap;
672	struct nfsmount *nmp;
673	int timeo;
674
675	np = VTONFS(vp);
676	vap = &np->n_vattr;
677	nmp = VFSTONFS(vp->v_mount);
678#ifdef NFS_ACDEBUG
679	mtx_lock(&Giant);	/* nfs_printf() */
680#endif
681	mtx_lock(&np->n_mtx);
682	/* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
683	timeo = (time_second - np->n_mtime.tv_sec) / 10;
684
685#ifdef NFS_ACDEBUG
686	if (nfs_acdebug>1)
687		nfs_printf("nfs_getattrcache: initial timeo = %d\n", timeo);
688#endif
689
690	if (vap->va_type == VDIR) {
691		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin)
692			timeo = nmp->nm_acdirmin;
693		else if (timeo > nmp->nm_acdirmax)
694			timeo = nmp->nm_acdirmax;
695	} else {
696		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin)
697			timeo = nmp->nm_acregmin;
698		else if (timeo > nmp->nm_acregmax)
699			timeo = nmp->nm_acregmax;
700	}
701
702#ifdef NFS_ACDEBUG
703	if (nfs_acdebug > 2)
704		nfs_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n",
705			   nmp->nm_acregmin, nmp->nm_acregmax,
706			   nmp->nm_acdirmin, nmp->nm_acdirmax);
707
708	if (nfs_acdebug)
709		nfs_printf("nfs_getattrcache: age = %d; final timeo = %d\n",
710			   (time_second - np->n_attrstamp), timeo);
711#endif
712
713	if ((time_second - np->n_attrstamp) >= timeo) {
714		nfsstats.attrcache_misses++;
715		mtx_unlock(&np->n_mtx);
716#ifdef NFS_ACDEBUG
717		mtx_unlock(&Giant);	/* nfs_printf() */
718#endif
719		KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
720		return (ENOENT);
721	}
722	nfsstats.attrcache_hits++;
723	if (vap->va_size != np->n_size) {
724		if (vap->va_type == VREG) {
725			if (np->n_flag & NMODIFIED) {
726				if (vap->va_size < np->n_size)
727					vap->va_size = np->n_size;
728				else
729					np->n_size = vap->va_size;
730			} else {
731				np->n_size = vap->va_size;
732			}
733			vnode_pager_setsize(vp, np->n_size);
734		} else {
735			np->n_size = vap->va_size;
736		}
737	}
738	bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr));
739	if (np->n_flag & NCHG) {
740		if (np->n_flag & NACC)
741			vaper->va_atime = np->n_atim;
742		if (np->n_flag & NUPD)
743			vaper->va_mtime = np->n_mtim;
744	}
745	mtx_unlock(&np->n_mtx);
746#ifdef NFS_ACDEBUG
747	mtx_unlock(&Giant);	/* nfs_printf() */
748#endif
749	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap);
750	return (0);
751}
752
753/*
754 * Purge all cached information about an NFS vnode including name
755 * cache entries, the attribute cache, and the access cache.  This is
756 * called when an NFS request for a node fails with a stale
757 * filehandle.
758 */
759void
760nfs_purgecache(struct vnode *vp)
761{
762	struct nfsnode *np;
763	int i;
764
765	np = VTONFS(vp);
766	cache_purge(vp);
767	mtx_lock(&np->n_mtx);
768	np->n_attrstamp = 0;
769	KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
770	for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
771		np->n_accesscache[i].stamp = 0;
772	KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
773	mtx_unlock(&np->n_mtx);
774}
775
776static nfsuint64 nfs_nullcookie = { { 0, 0 } };
777/*
778 * This function finds the directory cookie that corresponds to the
779 * logical byte offset given.
780 */
781nfsuint64 *
782nfs_getcookie(struct nfsnode *np, off_t off, int add)
783{
784	struct nfsdmap *dp, *dp2;
785	int pos;
786	nfsuint64 *retval = NULL;
787
788	pos = (uoff_t)off / NFS_DIRBLKSIZ;
789	if (pos == 0 || off < 0) {
790		KASSERT(!add, ("nfs getcookie add at <= 0"));
791		return (&nfs_nullcookie);
792	}
793	pos--;
794	dp = LIST_FIRST(&np->n_cookies);
795	if (!dp) {
796		if (add) {
797			dp = malloc(sizeof (struct nfsdmap),
798				M_NFSDIROFF, M_WAITOK);
799			dp->ndm_eocookie = 0;
800			LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
801		} else
802			goto out;
803	}
804	while (pos >= NFSNUMCOOKIES) {
805		pos -= NFSNUMCOOKIES;
806		if (LIST_NEXT(dp, ndm_list)) {
807			if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
808			    pos >= dp->ndm_eocookie)
809				goto out;
810			dp = LIST_NEXT(dp, ndm_list);
811		} else if (add) {
812			dp2 = malloc(sizeof (struct nfsdmap),
813				M_NFSDIROFF, M_WAITOK);
814			dp2->ndm_eocookie = 0;
815			LIST_INSERT_AFTER(dp, dp2, ndm_list);
816			dp = dp2;
817		} else
818			goto out;
819	}
820	if (pos >= dp->ndm_eocookie) {
821		if (add)
822			dp->ndm_eocookie = pos + 1;
823		else
824			goto out;
825	}
826	retval = &dp->ndm_cookies[pos];
827out:
828	return (retval);
829}
830
831/*
832 * Invalidate cached directory information, except for the actual directory
833 * blocks (which are invalidated separately).
834 * Done mainly to avoid the use of stale offset cookies.
835 */
836void
837nfs_invaldir(struct vnode *vp)
838{
839	struct nfsnode *np = VTONFS(vp);
840
841	KASSERT(vp->v_type == VDIR, ("nfs: invaldir not dir"));
842	nfs_dircookie_lock(np);
843	np->n_direofoffset = 0;
844	np->n_cookieverf.nfsuquad[0] = 0;
845	np->n_cookieverf.nfsuquad[1] = 0;
846	if (LIST_FIRST(&np->n_cookies))
847		LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0;
848	nfs_dircookie_unlock(np);
849}
850
851/*
852 * The write verifier has changed (probably due to a server reboot), so all
853 * B_NEEDCOMMIT blocks will have to be written again. Since they are on the
854 * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
855 * and B_CLUSTEROK flags.  Once done the new write verifier can be set for the
856 * mount point.
857 *
858 * B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data
859 * writes are not clusterable.
860 */
861void
862nfs_clearcommit(struct mount *mp)
863{
864	struct vnode *vp, *nvp;
865	struct buf *bp, *nbp;
866	struct bufobj *bo;
867
868	MNT_VNODE_FOREACH_ALL(vp, mp, nvp) {
869		bo = &vp->v_bufobj;
870		vholdl(vp);
871		VI_UNLOCK(vp);
872		BO_LOCK(bo);
873		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
874			if (!BUF_ISLOCKED(bp) &&
875			    (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
876				== (B_DELWRI | B_NEEDCOMMIT))
877				bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
878		}
879		BO_UNLOCK(bo);
880		vdrop(vp);
881	}
882}
883
884/*
885 * Helper functions for former macros.  Some of these should be
886 * moved to their callers.
887 */
888
889int
890nfsm_mtofh_xx(struct vnode *d, struct vnode **v, int v3, int *f,
891    struct mbuf **md, caddr_t *dpos)
892{
893	struct nfsnode *ttnp;
894	struct vnode *ttvp;
895	nfsfh_t *ttfhp;
896	u_int32_t *tl;
897	int ttfhsize;
898	int t1;
899
900	if (v3) {
901		tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
902		if (tl == NULL)
903			return EBADRPC;
904		*f = fxdr_unsigned(int, *tl);
905	} else
906		*f = 1;
907	if (*f) {
908		t1 = nfsm_getfh_xx(&ttfhp, &ttfhsize, (v3), md, dpos);
909		if (t1 != 0)
910			return t1;
911		t1 = nfs_nget(d->v_mount, ttfhp, ttfhsize, &ttnp, LK_EXCLUSIVE);
912		if (t1 != 0)
913			return t1;
914		*v = NFSTOV(ttnp);
915	}
916	if (v3) {
917		tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
918		if (tl == NULL)
919			return EBADRPC;
920		if (*f)
921			*f = fxdr_unsigned(int, *tl);
922		else if (fxdr_unsigned(int, *tl))
923			nfsm_adv_xx(NFSX_V3FATTR, md, dpos);
924	}
925	if (*f) {
926		ttvp = *v;
927		t1 = nfs_loadattrcache(&ttvp, md, dpos, NULL, 0);
928		if (t1)
929			return t1;
930		*v = ttvp;
931	}
932	return 0;
933}
934
935int
936nfsm_getfh_xx(nfsfh_t **f, int *s, int v3, struct mbuf **md, caddr_t *dpos)
937{
938	u_int32_t *tl;
939
940	if (v3) {
941		tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
942		if (tl == NULL)
943			return EBADRPC;
944		*s = fxdr_unsigned(int, *tl);
945		if (*s <= 0 || *s > NFSX_V3FHMAX)
946			return EBADRPC;
947	} else
948		*s = NFSX_V2FH;
949	*f = nfsm_dissect_xx(nfsm_rndup(*s), md, dpos);
950	if (*f == NULL)
951		return EBADRPC;
952	else
953		return 0;
954}
955
956
957int
958nfsm_loadattr_xx(struct vnode **v, struct vattr *va, struct mbuf **md,
959		 caddr_t *dpos)
960{
961	int t1;
962
963	struct vnode *ttvp = *v;
964	t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 0);
965	if (t1 != 0)
966		return t1;
967	*v = ttvp;
968	return 0;
969}
970
971int
972nfsm_postop_attr_xx(struct vnode **v, int *f, struct vattr *va,
973		    struct mbuf **md, caddr_t *dpos)
974{
975	u_int32_t *tl;
976	int t1;
977
978	struct vnode *ttvp = *v;
979	tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
980	if (tl == NULL)
981		return EBADRPC;
982	*f = fxdr_unsigned(int, *tl);
983	if (*f != 0) {
984		t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 1);
985		if (t1 != 0) {
986			*f = 0;
987			return t1;
988		}
989		*v = ttvp;
990	}
991	return 0;
992}
993
994int
995nfsm_wcc_data_xx(struct vnode **v, int *f, struct mbuf **md, caddr_t *dpos)
996{
997	u_int32_t *tl;
998	int ttattrf, ttretf = 0;
999	int t1;
1000
1001	tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
1002	if (tl == NULL)
1003		return EBADRPC;
1004	if (*tl == nfs_true) {
1005		tl = nfsm_dissect_xx(6 * NFSX_UNSIGNED, md, dpos);
1006		if (tl == NULL)
1007			return EBADRPC;
1008		mtx_lock(&(VTONFS(*v))->n_mtx);
1009		if (*f)
1010 			ttretf = (VTONFS(*v)->n_mtime.tv_sec == fxdr_unsigned(u_int32_t, *(tl + 2)) &&
1011				  VTONFS(*v)->n_mtime.tv_nsec == fxdr_unsigned(u_int32_t, *(tl + 3)));
1012		mtx_unlock(&(VTONFS(*v))->n_mtx);
1013	}
1014	t1 = nfsm_postop_attr_xx(v, &ttattrf, NULL, md, dpos);
1015	if (t1)
1016		return t1;
1017	if (*f)
1018		*f = ttretf;
1019	else
1020		*f = ttattrf;
1021	return 0;
1022}
1023
1024int
1025nfsm_strtom_xx(const char *a, int s, int m, struct mbuf **mb, caddr_t *bpos)
1026{
1027	u_int32_t *tl;
1028	int t1;
1029
1030	if (s > m)
1031		return ENAMETOOLONG;
1032	t1 = nfsm_rndup(s) + NFSX_UNSIGNED;
1033	if (t1 <= M_TRAILINGSPACE(*mb)) {
1034		tl = nfsm_build_xx(t1, mb, bpos);
1035		*tl++ = txdr_unsigned(s);
1036		*(tl + ((t1 >> 2) - 2)) = 0;
1037		bcopy(a, tl, s);
1038	} else {
1039		t1 = nfsm_strtmbuf(mb, bpos, a, s);
1040		if (t1 != 0)
1041			return t1;
1042	}
1043	return 0;
1044}
1045
1046int
1047nfsm_fhtom_xx(struct vnode *v, int v3, struct mbuf **mb, caddr_t *bpos)
1048{
1049	u_int32_t *tl;
1050	int t1;
1051	caddr_t cp;
1052
1053	if (v3) {
1054		t1 = nfsm_rndup(VTONFS(v)->n_fhsize) + NFSX_UNSIGNED;
1055		if (t1 < M_TRAILINGSPACE(*mb)) {
1056			tl = nfsm_build_xx(t1, mb, bpos);
1057			*tl++ = txdr_unsigned(VTONFS(v)->n_fhsize);
1058			*(tl + ((t1 >> 2) - 2)) = 0;
1059			bcopy(VTONFS(v)->n_fhp, tl, VTONFS(v)->n_fhsize);
1060		} else {
1061			t1 = nfsm_strtmbuf(mb, bpos,
1062			    (const char *)VTONFS(v)->n_fhp,
1063			    VTONFS(v)->n_fhsize);
1064			if (t1 != 0)
1065				return t1;
1066		}
1067	} else {
1068		cp = nfsm_build_xx(NFSX_V2FH, mb, bpos);
1069		bcopy(VTONFS(v)->n_fhp, cp, NFSX_V2FH);
1070	}
1071	return 0;
1072}
1073
1074void
1075nfsm_v3attrbuild_xx(struct vattr *va, int full, struct mbuf **mb,
1076    caddr_t *bpos)
1077{
1078	u_int32_t *tl;
1079
1080	if (va->va_mode != (mode_t)VNOVAL) {
1081		tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1082		*tl++ = nfs_true;
1083		*tl = txdr_unsigned(va->va_mode);
1084	} else {
1085		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1086		*tl = nfs_false;
1087	}
1088	if (full && va->va_uid != (uid_t)VNOVAL) {
1089		tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1090		*tl++ = nfs_true;
1091		*tl = txdr_unsigned(va->va_uid);
1092	} else {
1093		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1094		*tl = nfs_false;
1095	}
1096	if (full && va->va_gid != (gid_t)VNOVAL) {
1097		tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
1098		*tl++ = nfs_true;
1099		*tl = txdr_unsigned(va->va_gid);
1100	} else {
1101		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1102		*tl = nfs_false;
1103	}
1104	if (full && va->va_size != VNOVAL) {
1105		tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1106		*tl++ = nfs_true;
1107		txdr_hyper(va->va_size, tl);
1108	} else {
1109		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1110		*tl = nfs_false;
1111	}
1112	if (va->va_atime.tv_sec != VNOVAL) {
1113		if ((va->va_vaflags & VA_UTIMES_NULL) == 0) {
1114			tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1115			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1116			txdr_nfsv3time(&va->va_atime, tl);
1117		} else {
1118			tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1119			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1120		}
1121	} else {
1122		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1123		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1124	}
1125	if (va->va_mtime.tv_sec != VNOVAL) {
1126		if ((va->va_vaflags & VA_UTIMES_NULL) == 0) {
1127			tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
1128			*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
1129			txdr_nfsv3time(&va->va_mtime, tl);
1130		} else {
1131			tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1132			*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
1133		}
1134	} else {
1135		tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
1136		*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
1137	}
1138}
1139