nfs_clport.c revision 331722
1/*-
2 * Copyright (c) 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/11/sys/fs/nfsclient/nfs_clport.c 331722 2018-03-29 02:50:57Z eadler $");
36
37#include "opt_inet.h"
38#include "opt_inet6.h"
39
40#include <sys/capsicum.h>
41
42/*
43 * generally, I don't like #includes inside .h files, but it seems to
44 * be the easiest way to handle the port.
45 */
46#include <sys/fail.h>
47#include <sys/hash.h>
48#include <sys/sysctl.h>
49#include <fs/nfs/nfsport.h>
50#include <netinet/in_fib.h>
51#include <netinet/if_ether.h>
52#include <netinet6/ip6_var.h>
53#include <net/if_types.h>
54
55#include <fs/nfsclient/nfs_kdtrace.h>
56
57#ifdef KDTRACE_HOOKS
58dtrace_nfsclient_attrcache_flush_probe_func_t
59		dtrace_nfscl_attrcache_flush_done_probe;
60uint32_t	nfscl_attrcache_flush_done_id;
61
62dtrace_nfsclient_attrcache_get_hit_probe_func_t
63		dtrace_nfscl_attrcache_get_hit_probe;
64uint32_t	nfscl_attrcache_get_hit_id;
65
66dtrace_nfsclient_attrcache_get_miss_probe_func_t
67		dtrace_nfscl_attrcache_get_miss_probe;
68uint32_t	nfscl_attrcache_get_miss_id;
69
70dtrace_nfsclient_attrcache_load_probe_func_t
71		dtrace_nfscl_attrcache_load_done_probe;
72uint32_t	nfscl_attrcache_load_done_id;
73#endif /* !KDTRACE_HOOKS */
74
75extern u_int32_t newnfs_true, newnfs_false, newnfs_xdrneg1;
76extern struct vop_vector newnfs_vnodeops;
77extern struct vop_vector newnfs_fifoops;
78extern uma_zone_t newnfsnode_zone;
79extern struct buf_ops buf_ops_newnfs;
80extern int ncl_pbuf_freecnt;
81extern short nfsv4_cbport;
82extern int nfscl_enablecallb;
83extern int nfs_numnfscbd;
84extern int nfscl_inited;
85struct mtx ncl_iod_mutex;
86NFSDLOCKMUTEX;
87
88extern void (*ncl_call_invalcaches)(struct vnode *);
89
90SYSCTL_DECL(_vfs_nfs);
91static int ncl_fileid_maxwarnings = 10;
92SYSCTL_INT(_vfs_nfs, OID_AUTO, fileid_maxwarnings, CTLFLAG_RWTUN,
93    &ncl_fileid_maxwarnings, 0,
94    "Limit fileid corruption warnings; 0 is off; -1 is unlimited");
95static volatile int ncl_fileid_nwarnings;
96
97static void nfscl_warn_fileid(struct nfsmount *, struct nfsvattr *,
98    struct nfsvattr *);
99
100/*
101 * Comparison function for vfs_hash functions.
102 */
103int
104newnfs_vncmpf(struct vnode *vp, void *arg)
105{
106	struct nfsfh *nfhp = (struct nfsfh *)arg;
107	struct nfsnode *np = VTONFS(vp);
108
109	if (np->n_fhp->nfh_len != nfhp->nfh_len ||
110	    NFSBCMP(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len))
111		return (1);
112	return (0);
113}
114
115/*
116 * Look up a vnode/nfsnode by file handle.
117 * Callers must check for mount points!!
118 * In all cases, a pointer to a
119 * nfsnode structure is returned.
120 * This variant takes a "struct nfsfh *" as second argument and uses
121 * that structure up, either by hanging off the nfsnode or FREEing it.
122 */
123int
124nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
125    struct componentname *cnp, struct thread *td, struct nfsnode **npp,
126    void *stuff, int lkflags)
127{
128	struct nfsnode *np, *dnp;
129	struct vnode *vp, *nvp;
130	struct nfsv4node *newd, *oldd;
131	int error;
132	u_int hash;
133	struct nfsmount *nmp;
134
135	nmp = VFSTONFS(mntp);
136	dnp = VTONFS(dvp);
137	*npp = NULL;
138
139	hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT);
140
141	error = vfs_hash_get(mntp, hash, lkflags,
142	    td, &nvp, newnfs_vncmpf, nfhp);
143	if (error == 0 && nvp != NULL) {
144		/*
145		 * I believe there is a slight chance that vgonel() could
146		 * get called on this vnode between when NFSVOPLOCK() drops
147		 * the VI_LOCK() and vget() acquires it again, so that it
148		 * hasn't yet had v_usecount incremented. If this were to
149		 * happen, the VI_DOOMED flag would be set, so check for
150		 * that here. Since we now have the v_usecount incremented,
151		 * we should be ok until we vrele() it, if the VI_DOOMED
152		 * flag isn't set now.
153		 */
154		VI_LOCK(nvp);
155		if ((nvp->v_iflag & VI_DOOMED)) {
156			VI_UNLOCK(nvp);
157			vrele(nvp);
158			error = ENOENT;
159		} else {
160			VI_UNLOCK(nvp);
161		}
162	}
163	if (error) {
164		FREE((caddr_t)nfhp, M_NFSFH);
165		return (error);
166	}
167	if (nvp != NULL) {
168		np = VTONFS(nvp);
169		/*
170		 * For NFSv4, check to see if it is the same name and
171		 * replace the name, if it is different.
172		 */
173		oldd = newd = NULL;
174		if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL &&
175		    nvp->v_type == VREG &&
176		    (np->n_v4->n4_namelen != cnp->cn_namelen ||
177		     NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
178		     cnp->cn_namelen) ||
179		     dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
180		     NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
181		     dnp->n_fhp->nfh_len))) {
182		    MALLOC(newd, struct nfsv4node *,
183			sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len +
184			+ cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK);
185		    NFSLOCKNODE(np);
186		    if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG
187			&& (np->n_v4->n4_namelen != cnp->cn_namelen ||
188			 NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
189			 cnp->cn_namelen) ||
190			 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
191			 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
192			 dnp->n_fhp->nfh_len))) {
193			oldd = np->n_v4;
194			np->n_v4 = newd;
195			newd = NULL;
196			np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
197			np->n_v4->n4_namelen = cnp->cn_namelen;
198			NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
199			    dnp->n_fhp->nfh_len);
200			NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
201			    cnp->cn_namelen);
202		    }
203		    NFSUNLOCKNODE(np);
204		}
205		if (newd != NULL)
206			FREE((caddr_t)newd, M_NFSV4NODE);
207		if (oldd != NULL)
208			FREE((caddr_t)oldd, M_NFSV4NODE);
209		*npp = np;
210		FREE((caddr_t)nfhp, M_NFSFH);
211		return (0);
212	}
213	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);
214
215	error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp);
216	if (error) {
217		uma_zfree(newnfsnode_zone, np);
218		FREE((caddr_t)nfhp, M_NFSFH);
219		return (error);
220	}
221	vp = nvp;
222	KASSERT(vp->v_bufobj.bo_bsize != 0, ("nfscl_nget: bo_bsize == 0"));
223	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
224	vp->v_data = np;
225	np->n_vnode = vp;
226	/*
227	 * Initialize the mutex even if the vnode is going to be a loser.
228	 * This simplifies the logic in reclaim, which can then unconditionally
229	 * destroy the mutex (in the case of the loser, or if hash_insert
230	 * happened to return an error no special casing is needed).
231	 */
232	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
233	lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE |
234	    LK_CANRECURSE);
235
236	/*
237	 * Are we getting the root? If so, make sure the vnode flags
238	 * are correct
239	 */
240	if ((nfhp->nfh_len == nmp->nm_fhsize) &&
241	    !bcmp(nfhp->nfh_fh, nmp->nm_fh, nfhp->nfh_len)) {
242		if (vp->v_type == VNON)
243			vp->v_type = VDIR;
244		vp->v_vflag |= VV_ROOT;
245	}
246
247	np->n_fhp = nfhp;
248	/*
249	 * For NFSv4, we have to attach the directory file handle and
250	 * file name, so that Open Ops can be done later.
251	 */
252	if (nmp->nm_flag & NFSMNT_NFSV4) {
253		MALLOC(np->n_v4, struct nfsv4node *, sizeof (struct nfsv4node)
254		    + dnp->n_fhp->nfh_len + cnp->cn_namelen - 1, M_NFSV4NODE,
255		    M_WAITOK);
256		np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
257		np->n_v4->n4_namelen = cnp->cn_namelen;
258		NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
259		    dnp->n_fhp->nfh_len);
260		NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
261		    cnp->cn_namelen);
262	} else {
263		np->n_v4 = NULL;
264	}
265
266	/*
267	 * NFS supports recursive and shared locking.
268	 */
269	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
270	VN_LOCK_AREC(vp);
271	VN_LOCK_ASHARE(vp);
272	error = insmntque(vp, mntp);
273	if (error != 0) {
274		*npp = NULL;
275		mtx_destroy(&np->n_mtx);
276		lockdestroy(&np->n_excl);
277		FREE((caddr_t)nfhp, M_NFSFH);
278		if (np->n_v4 != NULL)
279			FREE((caddr_t)np->n_v4, M_NFSV4NODE);
280		uma_zfree(newnfsnode_zone, np);
281		return (error);
282	}
283	error = vfs_hash_insert(vp, hash, lkflags,
284	    td, &nvp, newnfs_vncmpf, nfhp);
285	if (error)
286		return (error);
287	if (nvp != NULL) {
288		*npp = VTONFS(nvp);
289		/* vfs_hash_insert() vput()'s the losing vnode */
290		return (0);
291	}
292	*npp = np;
293
294	return (0);
295}
296
297/*
298 * Another variant of nfs_nget(). This one is only used by reopen. It
299 * takes almost the same args as nfs_nget(), but only succeeds if an entry
300 * exists in the cache. (Since files should already be "open" with a
301 * vnode ref cnt on the node when reopen calls this, it should always
302 * succeed.)
303 * Also, don't get a vnode lock, since it may already be locked by some
304 * other process that is handling it. This is ok, since all other threads
305 * on the client are blocked by the nfsc_lock being exclusively held by the
306 * caller of this function.
307 */
308int
309nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize,
310    struct thread *td, struct nfsnode **npp)
311{
312	struct vnode *nvp;
313	u_int hash;
314	struct nfsfh *nfhp;
315	int error;
316
317	*npp = NULL;
318	/* For forced dismounts, just return error. */
319	if (NFSCL_FORCEDISM(mntp))
320		return (EINTR);
321	MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
322	    M_NFSFH, M_WAITOK);
323	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
324	nfhp->nfh_len = fhsize;
325
326	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);
327
328	/*
329	 * First, try to get the vnode locked, but don't block for the lock.
330	 */
331	error = vfs_hash_get(mntp, hash, (LK_EXCLUSIVE | LK_NOWAIT), td, &nvp,
332	    newnfs_vncmpf, nfhp);
333	if (error == 0 && nvp != NULL) {
334		NFSVOPUNLOCK(nvp, 0);
335	} else if (error == EBUSY) {
336		/*
337		 * It is safe so long as a vflush() with
338		 * FORCECLOSE has not been done. Since the Renew thread is
339		 * stopped and the MNTK_UNMOUNTF flag is set before doing
340		 * a vflush() with FORCECLOSE, we should be ok here.
341		 */
342		if (NFSCL_FORCEDISM(mntp))
343			error = EINTR;
344		else {
345			vfs_hash_ref(mntp, hash, td, &nvp, newnfs_vncmpf, nfhp);
346			if (nvp == NULL) {
347				error = ENOENT;
348			} else if ((nvp->v_iflag & VI_DOOMED) != 0) {
349				error = ENOENT;
350				vrele(nvp);
351			} else {
352				error = 0;
353			}
354		}
355	}
356	FREE(nfhp, M_NFSFH);
357	if (error)
358		return (error);
359	if (nvp != NULL) {
360		*npp = VTONFS(nvp);
361		return (0);
362	}
363	return (EINVAL);
364}
365
366static void
367nfscl_warn_fileid(struct nfsmount *nmp, struct nfsvattr *oldnap,
368    struct nfsvattr *newnap)
369{
370	int off;
371
372	if (ncl_fileid_maxwarnings >= 0 &&
373	    ncl_fileid_nwarnings >= ncl_fileid_maxwarnings)
374		return;
375	off = 0;
376	if (ncl_fileid_maxwarnings >= 0) {
377		if (++ncl_fileid_nwarnings >= ncl_fileid_maxwarnings)
378			off = 1;
379	}
380
381	printf("newnfs: server '%s' error: fileid changed. "
382	    "fsid %jx:%jx: expected fileid %#jx, got %#jx. "
383	    "(BROKEN NFS SERVER OR MIDDLEWARE)\n",
384	    nmp->nm_com.nmcom_hostname,
385	    (uintmax_t)nmp->nm_fsid[0],
386	    (uintmax_t)nmp->nm_fsid[1],
387	    (uintmax_t)oldnap->na_fileid,
388	    (uintmax_t)newnap->na_fileid);
389
390	if (off)
391		printf("newnfs: Logged %d times about fileid corruption; "
392		    "going quiet to avoid spamming logs excessively. (Limit "
393		    "is: %d).\n", ncl_fileid_nwarnings,
394		    ncl_fileid_maxwarnings);
395}
396
397/*
398 * Load the attribute cache (that lives in the nfsnode entry) with
399 * the attributes of the second argument and
400 * Iff vaper not NULL
401 *    copy the attributes to *vaper
402 * Similar to nfs_loadattrcache(), except the attributes are passed in
403 * instead of being parsed out of the mbuf list.
404 */
405int
406nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
407    void *stuff, int writeattr, int dontshrink)
408{
409	struct vnode *vp = *vpp;
410	struct vattr *vap, *nvap = &nap->na_vattr, *vaper = nvaper;
411	struct nfsnode *np;
412	struct nfsmount *nmp;
413	struct timespec mtime_save;
414	u_quad_t nsize;
415	int setnsize, error, force_fid_err;
416
417	error = 0;
418	setnsize = 0;
419	nsize = 0;
420
421	/*
422	 * If v_type == VNON it is a new node, so fill in the v_type,
423	 * n_mtime fields. Check to see if it represents a special
424	 * device, and if so, check for a possible alias. Once the
425	 * correct vnode has been obtained, fill in the rest of the
426	 * information.
427	 */
428	np = VTONFS(vp);
429	NFSLOCKNODE(np);
430	if (vp->v_type != nvap->va_type) {
431		vp->v_type = nvap->va_type;
432		if (vp->v_type == VFIFO)
433			vp->v_op = &newnfs_fifoops;
434		np->n_mtime = nvap->va_mtime;
435	}
436	nmp = VFSTONFS(vp->v_mount);
437	vap = &np->n_vattr.na_vattr;
438	mtime_save = vap->va_mtime;
439	if (writeattr) {
440		np->n_vattr.na_filerev = nap->na_filerev;
441		np->n_vattr.na_size = nap->na_size;
442		np->n_vattr.na_mtime = nap->na_mtime;
443		np->n_vattr.na_ctime = nap->na_ctime;
444		np->n_vattr.na_fsid = nap->na_fsid;
445		np->n_vattr.na_mode = nap->na_mode;
446	} else {
447		force_fid_err = 0;
448		KFAIL_POINT_ERROR(DEBUG_FP, nfscl_force_fileid_warning,
449		    force_fid_err);
450		/*
451		 * BROKEN NFS SERVER OR MIDDLEWARE
452		 *
453		 * Certain NFS servers (certain old proprietary filers ca.
454		 * 2006) or broken middleboxes (e.g. WAN accelerator products)
455		 * will respond to GETATTR requests with results for a
456		 * different fileid.
457		 *
458		 * The WAN accelerator we've observed not only serves stale
459		 * cache results for a given file, it also occasionally serves
460		 * results for wholly different files.  This causes surprising
461		 * problems; for example the cached size attribute of a file
462		 * may truncate down and then back up, resulting in zero
463		 * regions in file contents read by applications.  We observed
464		 * this reliably with Clang and .c files during parallel build.
465		 * A pcap revealed packet fragmentation and GETATTR RPC
466		 * responses with wholly wrong fileids.
467		 */
468		if ((np->n_vattr.na_fileid != 0 &&
469		     np->n_vattr.na_fileid != nap->na_fileid) ||
470		    force_fid_err) {
471			nfscl_warn_fileid(nmp, &np->n_vattr, nap);
472			error = EIDRM;
473			goto out;
474		}
475		NFSBCOPY((caddr_t)nap, (caddr_t)&np->n_vattr,
476		    sizeof (struct nfsvattr));
477	}
478
479	/*
480	 * For NFSv4, if the node's fsid is not equal to the mount point's
481	 * fsid, return the low order 32bits of the node's fsid. This
482	 * allows getcwd(3) to work. There is a chance that the fsid might
483	 * be the same as a local fs, but since this is in an NFS mount
484	 * point, I don't think that will cause any problems?
485	 */
486	if (NFSHASNFSV4(nmp) && NFSHASHASSETFSID(nmp) &&
487	    (nmp->nm_fsid[0] != np->n_vattr.na_filesid[0] ||
488	     nmp->nm_fsid[1] != np->n_vattr.na_filesid[1])) {
489		/*
490		 * va_fsid needs to be set to some value derived from
491		 * np->n_vattr.na_filesid that is not equal
492		 * vp->v_mount->mnt_stat.f_fsid[0], so that it changes
493		 * from the value used for the top level server volume
494		 * in the mounted subtree.
495		 */
496		if (vp->v_mount->mnt_stat.f_fsid.val[0] !=
497		    (uint32_t)np->n_vattr.na_filesid[0])
498			vap->va_fsid = (uint32_t)np->n_vattr.na_filesid[0];
499		else
500			vap->va_fsid = (uint32_t)hash32_buf(
501			    np->n_vattr.na_filesid, 2 * sizeof(uint64_t), 0);
502	} else
503		vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
504	np->n_attrstamp = time_second;
505	if (vap->va_size != np->n_size) {
506		if (vap->va_type == VREG) {
507			if (dontshrink && vap->va_size < np->n_size) {
508				/*
509				 * We've been told not to shrink the file;
510				 * zero np->n_attrstamp to indicate that
511				 * the attributes are stale.
512				 */
513				vap->va_size = np->n_size;
514				np->n_attrstamp = 0;
515				KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
516				vnode_pager_setsize(vp, np->n_size);
517			} else if (np->n_flag & NMODIFIED) {
518				/*
519				 * We've modified the file: Use the larger
520				 * of our size, and the server's size.
521				 */
522				if (vap->va_size < np->n_size) {
523					vap->va_size = np->n_size;
524				} else {
525					np->n_size = vap->va_size;
526					np->n_flag |= NSIZECHANGED;
527				}
528				vnode_pager_setsize(vp, np->n_size);
529			} else if (vap->va_size < np->n_size) {
530				/*
531				 * When shrinking the size, the call to
532				 * vnode_pager_setsize() cannot be done
533				 * with the mutex held, so delay it until
534				 * after the mtx_unlock call.
535				 */
536				nsize = np->n_size = vap->va_size;
537				np->n_flag |= NSIZECHANGED;
538				setnsize = 1;
539			} else {
540				np->n_size = vap->va_size;
541				np->n_flag |= NSIZECHANGED;
542				vnode_pager_setsize(vp, np->n_size);
543			}
544		} else {
545			np->n_size = vap->va_size;
546		}
547	}
548	/*
549	 * The following checks are added to prevent a race between (say)
550	 * a READDIR+ and a WRITE.
551	 * READDIR+, WRITE requests sent out.
552	 * READDIR+ resp, WRITE resp received on client.
553	 * However, the WRITE resp was handled before the READDIR+ resp
554	 * causing the post op attrs from the write to be loaded first
555	 * and the attrs from the READDIR+ to be loaded later. If this
556	 * happens, we have stale attrs loaded into the attrcache.
557	 * We detect this by for the mtime moving back. We invalidate the
558	 * attrcache when this happens.
559	 */
560	if (timespeccmp(&mtime_save, &vap->va_mtime, >)) {
561		/* Size changed or mtime went backwards */
562		np->n_attrstamp = 0;
563		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
564	}
565	if (vaper != NULL) {
566		NFSBCOPY((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
567		if (np->n_flag & NCHG) {
568			if (np->n_flag & NACC)
569				vaper->va_atime = np->n_atim;
570			if (np->n_flag & NUPD)
571				vaper->va_mtime = np->n_mtim;
572		}
573	}
574
575out:
576#ifdef KDTRACE_HOOKS
577	if (np->n_attrstamp != 0)
578		KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error);
579#endif
580	NFSUNLOCKNODE(np);
581	if (setnsize)
582		vnode_pager_setsize(vp, nsize);
583	return (error);
584}
585
586/*
587 * Fill in the client id name. For these bytes:
588 * 1 - they must be unique
589 * 2 - they should be persistent across client reboots
590 * 1 is more critical than 2
591 * Use the mount point's unique id plus either the uuid or, if that
592 * isn't set, random junk.
593 */
594void
595nfscl_fillclid(u_int64_t clval, char *uuid, u_int8_t *cp, u_int16_t idlen)
596{
597	int uuidlen;
598
599	/*
600	 * First, put in the 64bit mount point identifier.
601	 */
602	if (idlen >= sizeof (u_int64_t)) {
603		NFSBCOPY((caddr_t)&clval, cp, sizeof (u_int64_t));
604		cp += sizeof (u_int64_t);
605		idlen -= sizeof (u_int64_t);
606	}
607
608	/*
609	 * If uuid is non-zero length, use it.
610	 */
611	uuidlen = strlen(uuid);
612	if (uuidlen > 0 && idlen >= uuidlen) {
613		NFSBCOPY(uuid, cp, uuidlen);
614		cp += uuidlen;
615		idlen -= uuidlen;
616	}
617
618	/*
619	 * This only normally happens if the uuid isn't set.
620	 */
621	while (idlen > 0) {
622		*cp++ = (u_int8_t)(arc4random() % 256);
623		idlen--;
624	}
625}
626
627/*
628 * Fill in a lock owner name. For now, pid + the process's creation time.
629 */
630void
631nfscl_filllockowner(void *id, u_int8_t *cp, int flags)
632{
633	union {
634		u_int32_t	lval;
635		u_int8_t	cval[4];
636	} tl;
637	struct proc *p;
638
639	if (id == NULL) {
640		/* Return the single open_owner of all 0 bytes. */
641		bzero(cp, NFSV4CL_LOCKNAMELEN);
642		return;
643	}
644	if ((flags & F_POSIX) != 0) {
645		p = (struct proc *)id;
646		tl.lval = p->p_pid;
647		*cp++ = tl.cval[0];
648		*cp++ = tl.cval[1];
649		*cp++ = tl.cval[2];
650		*cp++ = tl.cval[3];
651		tl.lval = p->p_stats->p_start.tv_sec;
652		*cp++ = tl.cval[0];
653		*cp++ = tl.cval[1];
654		*cp++ = tl.cval[2];
655		*cp++ = tl.cval[3];
656		tl.lval = p->p_stats->p_start.tv_usec;
657		*cp++ = tl.cval[0];
658		*cp++ = tl.cval[1];
659		*cp++ = tl.cval[2];
660		*cp = tl.cval[3];
661	} else if ((flags & F_FLOCK) != 0) {
662		bcopy(&id, cp, sizeof(id));
663		bzero(&cp[sizeof(id)], NFSV4CL_LOCKNAMELEN - sizeof(id));
664	} else {
665		printf("nfscl_filllockowner: not F_POSIX or F_FLOCK\n");
666		bzero(cp, NFSV4CL_LOCKNAMELEN);
667	}
668}
669
670/*
671 * Find the parent process for the thread passed in as an argument.
672 * If none exists, return NULL, otherwise return a thread for the parent.
673 * (Can be any of the threads, since it is only used for td->td_proc.)
674 */
675NFSPROC_T *
676nfscl_getparent(struct thread *td)
677{
678	struct proc *p;
679	struct thread *ptd;
680
681	if (td == NULL)
682		return (NULL);
683	p = td->td_proc;
684	if (p->p_pid == 0)
685		return (NULL);
686	p = p->p_pptr;
687	if (p == NULL)
688		return (NULL);
689	ptd = TAILQ_FIRST(&p->p_threads);
690	return (ptd);
691}
692
693/*
694 * Start up the renew kernel thread.
695 */
696static void
697start_nfscl(void *arg)
698{
699	struct nfsclclient *clp;
700	struct thread *td;
701
702	clp = (struct nfsclclient *)arg;
703	td = TAILQ_FIRST(&clp->nfsc_renewthread->p_threads);
704	nfscl_renewthread(clp, td);
705	kproc_exit(0);
706}
707
708void
709nfscl_start_renewthread(struct nfsclclient *clp)
710{
711
712	kproc_create(start_nfscl, (void *)clp, &clp->nfsc_renewthread, 0, 0,
713	    "nfscl");
714}
715
716/*
717 * Handle wcc_data.
718 * For NFSv4, it assumes that nfsv4_wccattr() was used to set up the getattr
719 * as the first Op after PutFH.
720 * (For NFSv4, the postop attributes are after the Op, so they can't be
721 *  parsed here. A separate call to nfscl_postop_attr() is required.)
722 */
723int
724nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp,
725    struct nfsvattr *nap, int *flagp, int *wccflagp, void *stuff)
726{
727	u_int32_t *tl;
728	struct nfsnode *np = VTONFS(vp);
729	struct nfsvattr nfsva;
730	int error = 0;
731
732	if (wccflagp != NULL)
733		*wccflagp = 0;
734	if (nd->nd_flag & ND_NFSV3) {
735		*flagp = 0;
736		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
737		if (*tl == newnfs_true) {
738			NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
739			if (wccflagp != NULL) {
740				mtx_lock(&np->n_mtx);
741				*wccflagp = (np->n_mtime.tv_sec ==
742				    fxdr_unsigned(u_int32_t, *(tl + 2)) &&
743				    np->n_mtime.tv_nsec ==
744				    fxdr_unsigned(u_int32_t, *(tl + 3)));
745				mtx_unlock(&np->n_mtx);
746			}
747		}
748		error = nfscl_postop_attr(nd, nap, flagp, stuff);
749		if (wccflagp != NULL && *flagp == 0)
750			*wccflagp = 0;
751	} else if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR))
752	    == (ND_NFSV4 | ND_V4WCCATTR)) {
753		error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
754		    NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
755		    NULL, NULL, NULL, NULL, NULL);
756		if (error)
757			return (error);
758		/*
759		 * Get rid of Op# and status for next op.
760		 */
761		NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
762		if (*++tl)
763			nd->nd_flag |= ND_NOMOREDATA;
764		if (wccflagp != NULL &&
765		    nfsva.na_vattr.va_mtime.tv_sec != 0) {
766			mtx_lock(&np->n_mtx);
767			*wccflagp = (np->n_mtime.tv_sec ==
768			    nfsva.na_vattr.va_mtime.tv_sec &&
769			    np->n_mtime.tv_nsec ==
770			    nfsva.na_vattr.va_mtime.tv_sec);
771			mtx_unlock(&np->n_mtx);
772		}
773	}
774nfsmout:
775	return (error);
776}
777
778/*
779 * Get postop attributes.
780 */
781int
782nfscl_postop_attr(struct nfsrv_descript *nd, struct nfsvattr *nap, int *retp,
783    void *stuff)
784{
785	u_int32_t *tl;
786	int error = 0;
787
788	*retp = 0;
789	if (nd->nd_flag & ND_NOMOREDATA)
790		return (error);
791	if (nd->nd_flag & ND_NFSV3) {
792		NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
793		*retp = fxdr_unsigned(int, *tl);
794	} else if (nd->nd_flag & ND_NFSV4) {
795		/*
796		 * For NFSv4, the postop attr are at the end, so no point
797		 * in looking if nd_repstat != 0.
798		 */
799		if (!nd->nd_repstat) {
800			NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
801			if (*(tl + 1))
802				/* should never happen since nd_repstat != 0 */
803				nd->nd_flag |= ND_NOMOREDATA;
804			else
805				*retp = 1;
806		}
807	} else if (!nd->nd_repstat) {
808		/* For NFSv2, the attributes are here iff nd_repstat == 0 */
809		*retp = 1;
810	}
811	if (*retp) {
812		error = nfsm_loadattr(nd, nap);
813		if (error)
814			*retp = 0;
815	}
816nfsmout:
817	return (error);
818}
819
820/*
821 * Fill in the setable attributes. The full argument indicates whether
822 * to fill in them all or just mode and time.
823 */
824void
825nfscl_fillsattr(struct nfsrv_descript *nd, struct vattr *vap,
826    struct vnode *vp, int flags, u_int32_t rdev)
827{
828	u_int32_t *tl;
829	struct nfsv2_sattr *sp;
830	nfsattrbit_t attrbits;
831
832	switch (nd->nd_flag & (ND_NFSV2 | ND_NFSV3 | ND_NFSV4)) {
833	case ND_NFSV2:
834		NFSM_BUILD(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
835		if (vap->va_mode == (mode_t)VNOVAL)
836			sp->sa_mode = newnfs_xdrneg1;
837		else
838			sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
839		if (vap->va_uid == (uid_t)VNOVAL)
840			sp->sa_uid = newnfs_xdrneg1;
841		else
842			sp->sa_uid = txdr_unsigned(vap->va_uid);
843		if (vap->va_gid == (gid_t)VNOVAL)
844			sp->sa_gid = newnfs_xdrneg1;
845		else
846			sp->sa_gid = txdr_unsigned(vap->va_gid);
847		if (flags & NFSSATTR_SIZE0)
848			sp->sa_size = 0;
849		else if (flags & NFSSATTR_SIZENEG1)
850			sp->sa_size = newnfs_xdrneg1;
851		else if (flags & NFSSATTR_SIZERDEV)
852			sp->sa_size = txdr_unsigned(rdev);
853		else
854			sp->sa_size = txdr_unsigned(vap->va_size);
855		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
856		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
857		break;
858	case ND_NFSV3:
859		if (vap->va_mode != (mode_t)VNOVAL) {
860			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
861			*tl++ = newnfs_true;
862			*tl = txdr_unsigned(vap->va_mode);
863		} else {
864			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
865			*tl = newnfs_false;
866		}
867		if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL) {
868			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
869			*tl++ = newnfs_true;
870			*tl = txdr_unsigned(vap->va_uid);
871		} else {
872			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
873			*tl = newnfs_false;
874		}
875		if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL) {
876			NFSM_BUILD(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
877			*tl++ = newnfs_true;
878			*tl = txdr_unsigned(vap->va_gid);
879		} else {
880			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
881			*tl = newnfs_false;
882		}
883		if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL) {
884			NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
885			*tl++ = newnfs_true;
886			txdr_hyper(vap->va_size, tl);
887		} else {
888			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
889			*tl = newnfs_false;
890		}
891		if (vap->va_atime.tv_sec != VNOVAL) {
892			if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
893				NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
894				*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
895				txdr_nfsv3time(&vap->va_atime, tl);
896			} else {
897				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
898				*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
899			}
900		} else {
901			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
902			*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
903		}
904		if (vap->va_mtime.tv_sec != VNOVAL) {
905			if ((vap->va_vaflags & VA_UTIMES_NULL) == 0) {
906				NFSM_BUILD(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
907				*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
908				txdr_nfsv3time(&vap->va_mtime, tl);
909			} else {
910				NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
911				*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
912			}
913		} else {
914			NFSM_BUILD(tl, u_int32_t *, NFSX_UNSIGNED);
915			*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
916		}
917		break;
918	case ND_NFSV4:
919		NFSZERO_ATTRBIT(&attrbits);
920		if (vap->va_mode != (mode_t)VNOVAL)
921			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_MODE);
922		if ((flags & NFSSATTR_FULL) && vap->va_uid != (uid_t)VNOVAL)
923			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNER);
924		if ((flags & NFSSATTR_FULL) && vap->va_gid != (gid_t)VNOVAL)
925			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_OWNERGROUP);
926		if ((flags & NFSSATTR_FULL) && vap->va_size != VNOVAL)
927			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_SIZE);
928		if (vap->va_atime.tv_sec != VNOVAL)
929			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEACCESSSET);
930		if (vap->va_mtime.tv_sec != VNOVAL)
931			NFSSETBIT_ATTRBIT(&attrbits, NFSATTRBIT_TIMEMODIFYSET);
932		(void) nfsv4_fillattr(nd, vp->v_mount, vp, NULL, vap, NULL, 0,
933		    &attrbits, NULL, NULL, 0, 0, 0, 0, (uint64_t)0);
934		break;
935	}
936}
937
938/*
939 * nfscl_request() - mostly a wrapper for newnfs_request().
940 */
941int
942nfscl_request(struct nfsrv_descript *nd, struct vnode *vp, NFSPROC_T *p,
943    struct ucred *cred, void *stuff)
944{
945	int ret, vers;
946	struct nfsmount *nmp;
947
948	nmp = VFSTONFS(vp->v_mount);
949	if (nd->nd_flag & ND_NFSV4)
950		vers = NFS_VER4;
951	else if (nd->nd_flag & ND_NFSV3)
952		vers = NFS_VER3;
953	else
954		vers = NFS_VER2;
955	ret = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred,
956		NFS_PROG, vers, NULL, 1, NULL, NULL);
957	return (ret);
958}
959
960/*
961 * fill in this bsden's variant of statfs using nfsstatfs.
962 */
963void
964nfscl_loadsbinfo(struct nfsmount *nmp, struct nfsstatfs *sfp, void *statfs)
965{
966	struct statfs *sbp = (struct statfs *)statfs;
967
968	if (nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) {
969		sbp->f_bsize = NFS_FABLKSIZE;
970		sbp->f_blocks = sfp->sf_tbytes / NFS_FABLKSIZE;
971		sbp->f_bfree = sfp->sf_fbytes / NFS_FABLKSIZE;
972		/*
973		 * Although sf_abytes is uint64_t and f_bavail is int64_t,
974		 * the value after dividing by NFS_FABLKSIZE is small
975		 * enough that it will fit in 63bits, so it is ok to
976		 * assign it to f_bavail without fear that it will become
977		 * negative.
978		 */
979		sbp->f_bavail = sfp->sf_abytes / NFS_FABLKSIZE;
980		sbp->f_files = sfp->sf_tfiles;
981		/* Since f_ffree is int64_t, clip it to 63bits. */
982		if (sfp->sf_ffiles > INT64_MAX)
983			sbp->f_ffree = INT64_MAX;
984		else
985			sbp->f_ffree = sfp->sf_ffiles;
986	} else if ((nmp->nm_flag & NFSMNT_NFSV4) == 0) {
987		/*
988		 * The type casts to (int32_t) ensure that this code is
989		 * compatible with the old NFS client, in that it will
990		 * propagate bit31 to the high order bits. This may or may
991		 * not be correct for NFSv2, but since it is a legacy
992		 * environment, I'd rather retain backwards compatibility.
993		 */
994		sbp->f_bsize = (int32_t)sfp->sf_bsize;
995		sbp->f_blocks = (int32_t)sfp->sf_blocks;
996		sbp->f_bfree = (int32_t)sfp->sf_bfree;
997		sbp->f_bavail = (int32_t)sfp->sf_bavail;
998		sbp->f_files = 0;
999		sbp->f_ffree = 0;
1000	}
1001}
1002
1003/*
1004 * Use the fsinfo stuff to update the mount point.
1005 */
1006void
1007nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp)
1008{
1009
1010	if ((nmp->nm_wsize == 0 || fsp->fs_wtpref < nmp->nm_wsize) &&
1011	    fsp->fs_wtpref >= NFS_FABLKSIZE)
1012		nmp->nm_wsize = (fsp->fs_wtpref + NFS_FABLKSIZE - 1) &
1013		    ~(NFS_FABLKSIZE - 1);
1014	if (fsp->fs_wtmax < nmp->nm_wsize && fsp->fs_wtmax > 0) {
1015		nmp->nm_wsize = fsp->fs_wtmax & ~(NFS_FABLKSIZE - 1);
1016		if (nmp->nm_wsize == 0)
1017			nmp->nm_wsize = fsp->fs_wtmax;
1018	}
1019	if (nmp->nm_wsize < NFS_FABLKSIZE)
1020		nmp->nm_wsize = NFS_FABLKSIZE;
1021	if ((nmp->nm_rsize == 0 || fsp->fs_rtpref < nmp->nm_rsize) &&
1022	    fsp->fs_rtpref >= NFS_FABLKSIZE)
1023		nmp->nm_rsize = (fsp->fs_rtpref + NFS_FABLKSIZE - 1) &
1024		    ~(NFS_FABLKSIZE - 1);
1025	if (fsp->fs_rtmax < nmp->nm_rsize && fsp->fs_rtmax > 0) {
1026		nmp->nm_rsize = fsp->fs_rtmax & ~(NFS_FABLKSIZE - 1);
1027		if (nmp->nm_rsize == 0)
1028			nmp->nm_rsize = fsp->fs_rtmax;
1029	}
1030	if (nmp->nm_rsize < NFS_FABLKSIZE)
1031		nmp->nm_rsize = NFS_FABLKSIZE;
1032	if ((nmp->nm_readdirsize == 0 || fsp->fs_dtpref < nmp->nm_readdirsize)
1033	    && fsp->fs_dtpref >= NFS_DIRBLKSIZ)
1034		nmp->nm_readdirsize = (fsp->fs_dtpref + NFS_DIRBLKSIZ - 1) &
1035		    ~(NFS_DIRBLKSIZ - 1);
1036	if (fsp->fs_rtmax < nmp->nm_readdirsize && fsp->fs_rtmax > 0) {
1037		nmp->nm_readdirsize = fsp->fs_rtmax & ~(NFS_DIRBLKSIZ - 1);
1038		if (nmp->nm_readdirsize == 0)
1039			nmp->nm_readdirsize = fsp->fs_rtmax;
1040	}
1041	if (nmp->nm_readdirsize < NFS_DIRBLKSIZ)
1042		nmp->nm_readdirsize = NFS_DIRBLKSIZ;
1043	if (fsp->fs_maxfilesize > 0 &&
1044	    fsp->fs_maxfilesize < nmp->nm_maxfilesize)
1045		nmp->nm_maxfilesize = fsp->fs_maxfilesize;
1046	nmp->nm_mountp->mnt_stat.f_iosize = newnfs_iosize(nmp);
1047	nmp->nm_state |= NFSSTA_GOTFSINFO;
1048}
1049
1050/*
1051 * Lookups source address which should be used to communicate with
1052 * @nmp and stores it inside @pdst.
1053 *
1054 * Returns 0 on success.
1055 */
1056u_int8_t *
1057nfscl_getmyip(struct nfsmount *nmp, struct in6_addr *paddr, int *isinet6p)
1058{
1059#if defined(INET6) || defined(INET)
1060	int error, fibnum;
1061
1062	fibnum = curthread->td_proc->p_fibnum;
1063#endif
1064#ifdef INET
1065	if (nmp->nm_nam->sa_family == AF_INET) {
1066		struct sockaddr_in *sin;
1067		struct nhop4_extended nh_ext;
1068
1069		sin = (struct sockaddr_in *)nmp->nm_nam;
1070		CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
1071		error = fib4_lookup_nh_ext(fibnum, sin->sin_addr, 0, 0,
1072		    &nh_ext);
1073		CURVNET_RESTORE();
1074		if (error != 0)
1075			return (NULL);
1076
1077		if ((ntohl(nh_ext.nh_src.s_addr) >> IN_CLASSA_NSHIFT) ==
1078		    IN_LOOPBACKNET) {
1079			/* Ignore loopback addresses */
1080			return (NULL);
1081		}
1082
1083		*isinet6p = 0;
1084		*((struct in_addr *)paddr) = nh_ext.nh_src;
1085
1086		return (u_int8_t *)paddr;
1087	}
1088#endif
1089#ifdef INET6
1090	if (nmp->nm_nam->sa_family == AF_INET6) {
1091		struct sockaddr_in6 *sin6;
1092
1093		sin6 = (struct sockaddr_in6 *)nmp->nm_nam;
1094
1095		CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
1096		error = in6_selectsrc_addr(fibnum, &sin6->sin6_addr,
1097		    sin6->sin6_scope_id, NULL, paddr, NULL);
1098		CURVNET_RESTORE();
1099		if (error != 0)
1100			return (NULL);
1101
1102		if (IN6_IS_ADDR_LOOPBACK(paddr))
1103			return (NULL);
1104
1105		/* Scope is embedded in */
1106		*isinet6p = 1;
1107
1108		return (u_int8_t *)paddr;
1109	}
1110#endif
1111	return (NULL);
1112}
1113
1114/*
1115 * Copy NFS uid, gids from the cred structure.
1116 */
1117void
1118newnfs_copyincred(struct ucred *cr, struct nfscred *nfscr)
1119{
1120	int i;
1121
1122	KASSERT(cr->cr_ngroups >= 0,
1123	    ("newnfs_copyincred: negative cr_ngroups"));
1124	nfscr->nfsc_uid = cr->cr_uid;
1125	nfscr->nfsc_ngroups = MIN(cr->cr_ngroups, NFS_MAXGRPS + 1);
1126	for (i = 0; i < nfscr->nfsc_ngroups; i++)
1127		nfscr->nfsc_groups[i] = cr->cr_groups[i];
1128}
1129
1130
1131/*
1132 * Do any client specific initialization.
1133 */
1134void
1135nfscl_init(void)
1136{
1137	static int inited = 0;
1138
1139	if (inited)
1140		return;
1141	inited = 1;
1142	nfscl_inited = 1;
1143	ncl_pbuf_freecnt = nswbuf / 2 + 1;
1144}
1145
1146/*
1147 * Check each of the attributes to be set, to ensure they aren't already
1148 * the correct value. Disable setting ones already correct.
1149 */
1150int
1151nfscl_checksattr(struct vattr *vap, struct nfsvattr *nvap)
1152{
1153
1154	if (vap->va_mode != (mode_t)VNOVAL) {
1155		if (vap->va_mode == nvap->na_mode)
1156			vap->va_mode = (mode_t)VNOVAL;
1157	}
1158	if (vap->va_uid != (uid_t)VNOVAL) {
1159		if (vap->va_uid == nvap->na_uid)
1160			vap->va_uid = (uid_t)VNOVAL;
1161	}
1162	if (vap->va_gid != (gid_t)VNOVAL) {
1163		if (vap->va_gid == nvap->na_gid)
1164			vap->va_gid = (gid_t)VNOVAL;
1165	}
1166	if (vap->va_size != VNOVAL) {
1167		if (vap->va_size == nvap->na_size)
1168			vap->va_size = VNOVAL;
1169	}
1170
1171	/*
1172	 * We are normally called with only a partially initialized
1173	 * VAP.  Since the NFSv3 spec says that server may use the
1174	 * file attributes to store the verifier, the spec requires
1175	 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1176	 * in atime, but we can't really assume that all servers will
1177	 * so we ensure that our SETATTR sets both atime and mtime.
1178	 * Set the VA_UTIMES_NULL flag for this case, so that
1179	 * the server's time will be used.  This is needed to
1180	 * work around a bug in some Solaris servers, where
1181	 * setting the time TOCLIENT causes the Setattr RPC
1182	 * to return NFS_OK, but not set va_mode.
1183	 */
1184	if (vap->va_mtime.tv_sec == VNOVAL) {
1185		vfs_timestamp(&vap->va_mtime);
1186		vap->va_vaflags |= VA_UTIMES_NULL;
1187	}
1188	if (vap->va_atime.tv_sec == VNOVAL)
1189		vap->va_atime = vap->va_mtime;
1190	return (1);
1191}
1192
1193/*
1194 * Map nfsv4 errors to errno.h errors.
1195 * The uid and gid arguments are only used for NFSERR_BADOWNER and that
1196 * error should only be returned for the Open, Create and Setattr Ops.
1197 * As such, most calls can just pass in 0 for those arguments.
1198 */
1199APPLESTATIC int
1200nfscl_maperr(struct thread *td, int error, uid_t uid, gid_t gid)
1201{
1202	struct proc *p;
1203
1204	if (error < 10000 || error >= NFSERR_STALEWRITEVERF)
1205		return (error);
1206	if (td != NULL)
1207		p = td->td_proc;
1208	else
1209		p = NULL;
1210	switch (error) {
1211	case NFSERR_BADOWNER:
1212		tprintf(p, LOG_INFO,
1213		    "No name and/or group mapping for uid,gid:(%d,%d)\n",
1214		    uid, gid);
1215		return (EPERM);
1216	case NFSERR_BADNAME:
1217	case NFSERR_BADCHAR:
1218		printf("nfsv4 char/name not handled by server\n");
1219		return (ENOENT);
1220	case NFSERR_STALECLIENTID:
1221	case NFSERR_STALESTATEID:
1222	case NFSERR_EXPIRED:
1223	case NFSERR_BADSTATEID:
1224	case NFSERR_BADSESSION:
1225		printf("nfsv4 recover err returned %d\n", error);
1226		return (EIO);
1227	case NFSERR_BADHANDLE:
1228	case NFSERR_SERVERFAULT:
1229	case NFSERR_BADTYPE:
1230	case NFSERR_FHEXPIRED:
1231	case NFSERR_RESOURCE:
1232	case NFSERR_MOVED:
1233	case NFSERR_NOFILEHANDLE:
1234	case NFSERR_MINORVERMISMATCH:
1235	case NFSERR_OLDSTATEID:
1236	case NFSERR_BADSEQID:
1237	case NFSERR_LEASEMOVED:
1238	case NFSERR_RECLAIMBAD:
1239	case NFSERR_BADXDR:
1240	case NFSERR_OPILLEGAL:
1241		printf("nfsv4 client/server protocol prob err=%d\n",
1242		    error);
1243		return (EIO);
1244	default:
1245		tprintf(p, LOG_INFO, "nfsv4 err=%d\n", error);
1246		return (EIO);
1247	};
1248}
1249
1250/*
1251 * Check to see if the process for this owner exists. Return 1 if it doesn't
1252 * and 0 otherwise.
1253 */
1254int
1255nfscl_procdoesntexist(u_int8_t *own)
1256{
1257	union {
1258		u_int32_t	lval;
1259		u_int8_t	cval[4];
1260	} tl;
1261	struct proc *p;
1262	pid_t pid;
1263	int i, ret = 0;
1264
1265	/* For the single open_owner of all 0 bytes, just return 0. */
1266	for (i = 0; i < NFSV4CL_LOCKNAMELEN; i++)
1267		if (own[i] != 0)
1268			break;
1269	if (i == NFSV4CL_LOCKNAMELEN)
1270		return (0);
1271
1272	tl.cval[0] = *own++;
1273	tl.cval[1] = *own++;
1274	tl.cval[2] = *own++;
1275	tl.cval[3] = *own++;
1276	pid = tl.lval;
1277	p = pfind_locked(pid);
1278	if (p == NULL)
1279		return (1);
1280	if (p->p_stats == NULL) {
1281		PROC_UNLOCK(p);
1282		return (0);
1283	}
1284	tl.cval[0] = *own++;
1285	tl.cval[1] = *own++;
1286	tl.cval[2] = *own++;
1287	tl.cval[3] = *own++;
1288	if (tl.lval != p->p_stats->p_start.tv_sec) {
1289		ret = 1;
1290	} else {
1291		tl.cval[0] = *own++;
1292		tl.cval[1] = *own++;
1293		tl.cval[2] = *own++;
1294		tl.cval[3] = *own;
1295		if (tl.lval != p->p_stats->p_start.tv_usec)
1296			ret = 1;
1297	}
1298	PROC_UNLOCK(p);
1299	return (ret);
1300}
1301
1302/*
1303 * - nfs pseudo system call for the client
1304 */
1305/*
1306 * MPSAFE
1307 */
1308static int
1309nfssvc_nfscl(struct thread *td, struct nfssvc_args *uap)
1310{
1311	struct file *fp;
1312	struct nfscbd_args nfscbdarg;
1313	struct nfsd_nfscbd_args nfscbdarg2;
1314	struct nameidata nd;
1315	struct nfscl_dumpmntopts dumpmntopts;
1316	cap_rights_t rights;
1317	char *buf;
1318	int error;
1319	struct mount *mp;
1320	struct nfsmount *nmp;
1321
1322	if (uap->flag & NFSSVC_CBADDSOCK) {
1323		error = copyin(uap->argp, (caddr_t)&nfscbdarg, sizeof(nfscbdarg));
1324		if (error)
1325			return (error);
1326		/*
1327		 * Since we don't know what rights might be required,
1328		 * pretend that we need them all. It is better to be too
1329		 * careful than too reckless.
1330		 */
1331		error = fget(td, nfscbdarg.sock,
1332		    cap_rights_init(&rights, CAP_SOCK_CLIENT), &fp);
1333		if (error)
1334			return (error);
1335		if (fp->f_type != DTYPE_SOCKET) {
1336			fdrop(fp, td);
1337			return (EPERM);
1338		}
1339		error = nfscbd_addsock(fp);
1340		fdrop(fp, td);
1341		if (!error && nfscl_enablecallb == 0) {
1342			nfsv4_cbport = nfscbdarg.port;
1343			nfscl_enablecallb = 1;
1344		}
1345	} else if (uap->flag & NFSSVC_NFSCBD) {
1346		if (uap->argp == NULL)
1347			return (EINVAL);
1348		error = copyin(uap->argp, (caddr_t)&nfscbdarg2,
1349		    sizeof(nfscbdarg2));
1350		if (error)
1351			return (error);
1352		error = nfscbd_nfsd(td, &nfscbdarg2);
1353	} else if (uap->flag & NFSSVC_DUMPMNTOPTS) {
1354		error = copyin(uap->argp, &dumpmntopts, sizeof(dumpmntopts));
1355		if (error == 0 && (dumpmntopts.ndmnt_blen < 256 ||
1356		    dumpmntopts.ndmnt_blen > 1024))
1357			error = EINVAL;
1358		if (error == 0)
1359			error = nfsrv_lookupfilename(&nd,
1360			    dumpmntopts.ndmnt_fname, td);
1361		if (error == 0 && strcmp(nd.ni_vp->v_mount->mnt_vfc->vfc_name,
1362		    "nfs") != 0) {
1363			vput(nd.ni_vp);
1364			error = EINVAL;
1365		}
1366		if (error == 0) {
1367			buf = malloc(dumpmntopts.ndmnt_blen, M_TEMP, M_WAITOK);
1368			nfscl_retopts(VFSTONFS(nd.ni_vp->v_mount), buf,
1369			    dumpmntopts.ndmnt_blen);
1370			vput(nd.ni_vp);
1371			error = copyout(buf, dumpmntopts.ndmnt_buf,
1372			    dumpmntopts.ndmnt_blen);
1373			free(buf, M_TEMP);
1374		}
1375	} else if (uap->flag & NFSSVC_FORCEDISM) {
1376		buf = malloc(MNAMELEN + 1, M_TEMP, M_WAITOK);
1377		error = copyinstr(uap->argp, buf, MNAMELEN + 1, NULL);
1378		if (error == 0) {
1379			nmp = NULL;
1380			mtx_lock(&mountlist_mtx);
1381			TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1382				if (strcmp(mp->mnt_stat.f_mntonname, buf) ==
1383				    0 && strcmp(mp->mnt_stat.f_fstypename,
1384				    "nfs") == 0 && mp->mnt_data != NULL) {
1385					nmp = VFSTONFS(mp);
1386					mtx_lock(&nmp->nm_mtx);
1387					if ((nmp->nm_privflag &
1388					    NFSMNTP_FORCEDISM) == 0) {
1389						nmp->nm_privflag |=
1390						   (NFSMNTP_FORCEDISM |
1391						    NFSMNTP_CANCELRPCS);
1392						mtx_unlock(&nmp->nm_mtx);
1393					} else {
1394						nmp = NULL;
1395						mtx_unlock(&nmp->nm_mtx);
1396					}
1397					break;
1398				}
1399			}
1400			mtx_unlock(&mountlist_mtx);
1401
1402			if (nmp != NULL) {
1403				/*
1404				 * Call newnfs_nmcancelreqs() to cause
1405				 * any RPCs in progress on the mount point to
1406				 * fail.
1407				 * This will cause any process waiting for an
1408				 * RPC to complete while holding a vnode lock
1409				 * on the mounted-on vnode (such as "df" or
1410				 * a non-forced "umount") to fail.
1411				 * This will unlock the mounted-on vnode so
1412				 * a forced dismount can succeed.
1413				 * Then clear NFSMNTP_CANCELRPCS and wakeup(),
1414				 * so that nfs_unmount() can complete.
1415				 */
1416				newnfs_nmcancelreqs(nmp);
1417				mtx_lock(&nmp->nm_mtx);
1418				nmp->nm_privflag &= ~NFSMNTP_CANCELRPCS;
1419				wakeup(nmp);
1420				mtx_unlock(&nmp->nm_mtx);
1421			} else
1422				error = EINVAL;
1423		}
1424		free(buf, M_TEMP);
1425	} else {
1426		error = EINVAL;
1427	}
1428	return (error);
1429}
1430
1431extern int (*nfsd_call_nfscl)(struct thread *, struct nfssvc_args *);
1432
1433/*
1434 * Called once to initialize data structures...
1435 */
1436static int
1437nfscl_modevent(module_t mod, int type, void *data)
1438{
1439	int error = 0;
1440	static int loaded = 0;
1441
1442	switch (type) {
1443	case MOD_LOAD:
1444		if (loaded)
1445			return (0);
1446		newnfs_portinit();
1447		mtx_init(&ncl_iod_mutex, "ncl_iod_mutex", NULL, MTX_DEF);
1448		nfscl_init();
1449		NFSD_LOCK();
1450		nfsrvd_cbinit(0);
1451		NFSD_UNLOCK();
1452		ncl_call_invalcaches = ncl_invalcaches;
1453		nfsd_call_nfscl = nfssvc_nfscl;
1454		loaded = 1;
1455		break;
1456
1457	case MOD_UNLOAD:
1458		if (nfs_numnfscbd != 0) {
1459			error = EBUSY;
1460			break;
1461		}
1462
1463		/*
1464		 * XXX: Unloading of nfscl module is unsupported.
1465		 */
1466#if 0
1467		ncl_call_invalcaches = NULL;
1468		nfsd_call_nfscl = NULL;
1469		/* and get rid of the mutexes */
1470		mtx_destroy(&ncl_iod_mutex);
1471		loaded = 0;
1472		break;
1473#else
1474		/* FALLTHROUGH */
1475#endif
1476	default:
1477		error = EOPNOTSUPP;
1478		break;
1479	}
1480	return error;
1481}
1482static moduledata_t nfscl_mod = {
1483	"nfscl",
1484	nfscl_modevent,
1485	NULL,
1486};
1487DECLARE_MODULE(nfscl, nfscl_mod, SI_SUB_VFS, SI_ORDER_FIRST);
1488
1489/* So that loader and kldload(2) can find us, wherever we are.. */
1490MODULE_VERSION(nfscl, 1);
1491MODULE_DEPEND(nfscl, nfscommon, 1, 1, 1);
1492MODULE_DEPEND(nfscl, krpc, 1, 1, 1);
1493MODULE_DEPEND(nfscl, nfssvc, 1, 1, 1);
1494MODULE_DEPEND(nfscl, nfslock, 1, 1, 1);
1495
1496