vfs_mount.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1999-2004 Poul-Henning Kamp
5 * Copyright (c) 1999 Michael Smith
6 * Copyright (c) 1989, 1993
7 *	The Regents of the University of California.  All rights reserved.
8 * (c) UNIX System Laboratories, Inc.
9 * All or some portions of this file are derived from material licensed
10 * to the University of California by American Telephone and Telegraph
11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12 * the permission of UNIX System Laboratories, Inc.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD: stable/11/sys/kern/vfs_mount.c 330897 2018-03-14 03:19:51Z eadler $");
41
42#include <sys/param.h>
43#include <sys/conf.h>
44#include <sys/fcntl.h>
45#include <sys/jail.h>
46#include <sys/kernel.h>
47#include <sys/libkern.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/mutex.h>
51#include <sys/namei.h>
52#include <sys/priv.h>
53#include <sys/proc.h>
54#include <sys/filedesc.h>
55#include <sys/reboot.h>
56#include <sys/sbuf.h>
57#include <sys/syscallsubr.h>
58#include <sys/sysproto.h>
59#include <sys/sx.h>
60#include <sys/sysctl.h>
61#include <sys/sysent.h>
62#include <sys/systm.h>
63#include <sys/vnode.h>
64#include <vm/uma.h>
65
66#include <geom/geom.h>
67
68#include <machine/stdarg.h>
69
70#include <security/audit/audit.h>
71#include <security/mac/mac_framework.h>
72
73#define	VFS_MOUNTARG_SIZE_MAX	(1024 * 64)
74
75static int	vfs_domount(struct thread *td, const char *fstype, char *fspath,
76		    uint64_t fsflags, struct vfsoptlist **optlist);
77static void	free_mntarg(struct mntarg *ma);
78
79static int	usermount = 0;
80SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0,
81    "Unprivileged users may mount and unmount file systems");
82
83MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure");
84MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure");
85static uma_zone_t mount_zone;
86
87/* List of mounted filesystems. */
88struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
89
90/* For any iteration/modification of mountlist */
91struct mtx mountlist_mtx;
92MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF);
93
94/*
95 * Global opts, taken by all filesystems
96 */
97static const char *global_opts[] = {
98	"errmsg",
99	"fstype",
100	"fspath",
101	"ro",
102	"rw",
103	"nosuid",
104	"noexec",
105	NULL
106};
107
108static int
109mount_init(void *mem, int size, int flags)
110{
111	struct mount *mp;
112
113	mp = (struct mount *)mem;
114	mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
115	lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
116	return (0);
117}
118
119static void
120mount_fini(void *mem, int size)
121{
122	struct mount *mp;
123
124	mp = (struct mount *)mem;
125	lockdestroy(&mp->mnt_explock);
126	mtx_destroy(&mp->mnt_mtx);
127}
128
129static void
130vfs_mount_init(void *dummy __unused)
131{
132
133	mount_zone = uma_zcreate("Mountpoints", sizeof(struct mount), NULL,
134	    NULL, mount_init, mount_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
135}
136SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL);
137
138/*
139 * ---------------------------------------------------------------------
140 * Functions for building and sanitizing the mount options
141 */
142
143/* Remove one mount option. */
144static void
145vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt)
146{
147
148	TAILQ_REMOVE(opts, opt, link);
149	free(opt->name, M_MOUNT);
150	if (opt->value != NULL)
151		free(opt->value, M_MOUNT);
152	free(opt, M_MOUNT);
153}
154
155/* Release all resources related to the mount options. */
156void
157vfs_freeopts(struct vfsoptlist *opts)
158{
159	struct vfsopt *opt;
160
161	while (!TAILQ_EMPTY(opts)) {
162		opt = TAILQ_FIRST(opts);
163		vfs_freeopt(opts, opt);
164	}
165	free(opts, M_MOUNT);
166}
167
168void
169vfs_deleteopt(struct vfsoptlist *opts, const char *name)
170{
171	struct vfsopt *opt, *temp;
172
173	if (opts == NULL)
174		return;
175	TAILQ_FOREACH_SAFE(opt, opts, link, temp)  {
176		if (strcmp(opt->name, name) == 0)
177			vfs_freeopt(opts, opt);
178	}
179}
180
181static int
182vfs_isopt_ro(const char *opt)
183{
184
185	if (strcmp(opt, "ro") == 0 || strcmp(opt, "rdonly") == 0 ||
186	    strcmp(opt, "norw") == 0)
187		return (1);
188	return (0);
189}
190
191static int
192vfs_isopt_rw(const char *opt)
193{
194
195	if (strcmp(opt, "rw") == 0 || strcmp(opt, "noro") == 0)
196		return (1);
197	return (0);
198}
199
200/*
201 * Check if options are equal (with or without the "no" prefix).
202 */
203static int
204vfs_equalopts(const char *opt1, const char *opt2)
205{
206	char *p;
207
208	/* "opt" vs. "opt" or "noopt" vs. "noopt" */
209	if (strcmp(opt1, opt2) == 0)
210		return (1);
211	/* "noopt" vs. "opt" */
212	if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
213		return (1);
214	/* "opt" vs. "noopt" */
215	if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
216		return (1);
217	while ((p = strchr(opt1, '.')) != NULL &&
218	    !strncmp(opt1, opt2, ++p - opt1)) {
219		opt2 += p - opt1;
220		opt1 = p;
221		/* "foo.noopt" vs. "foo.opt" */
222		if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0)
223			return (1);
224		/* "foo.opt" vs. "foo.noopt" */
225		if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0)
226			return (1);
227	}
228	/* "ro" / "rdonly" / "norw" / "rw" / "noro" */
229	if ((vfs_isopt_ro(opt1) || vfs_isopt_rw(opt1)) &&
230	    (vfs_isopt_ro(opt2) || vfs_isopt_rw(opt2)))
231		return (1);
232	return (0);
233}
234
235/*
236 * If a mount option is specified several times,
237 * (with or without the "no" prefix) only keep
238 * the last occurrence of it.
239 */
240static void
241vfs_sanitizeopts(struct vfsoptlist *opts)
242{
243	struct vfsopt *opt, *opt2, *tmp;
244
245	TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) {
246		opt2 = TAILQ_PREV(opt, vfsoptlist, link);
247		while (opt2 != NULL) {
248			if (vfs_equalopts(opt->name, opt2->name)) {
249				tmp = TAILQ_PREV(opt2, vfsoptlist, link);
250				vfs_freeopt(opts, opt2);
251				opt2 = tmp;
252			} else {
253				opt2 = TAILQ_PREV(opt2, vfsoptlist, link);
254			}
255		}
256	}
257}
258
259/*
260 * Build a linked list of mount options from a struct uio.
261 */
262int
263vfs_buildopts(struct uio *auio, struct vfsoptlist **options)
264{
265	struct vfsoptlist *opts;
266	struct vfsopt *opt;
267	size_t memused, namelen, optlen;
268	unsigned int i, iovcnt;
269	int error;
270
271	opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK);
272	TAILQ_INIT(opts);
273	memused = 0;
274	iovcnt = auio->uio_iovcnt;
275	for (i = 0; i < iovcnt; i += 2) {
276		namelen = auio->uio_iov[i].iov_len;
277		optlen = auio->uio_iov[i + 1].iov_len;
278		memused += sizeof(struct vfsopt) + optlen + namelen;
279		/*
280		 * Avoid consuming too much memory, and attempts to overflow
281		 * memused.
282		 */
283		if (memused > VFS_MOUNTARG_SIZE_MAX ||
284		    optlen > VFS_MOUNTARG_SIZE_MAX ||
285		    namelen > VFS_MOUNTARG_SIZE_MAX) {
286			error = EINVAL;
287			goto bad;
288		}
289
290		opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
291		opt->name = malloc(namelen, M_MOUNT, M_WAITOK);
292		opt->value = NULL;
293		opt->len = 0;
294		opt->pos = i / 2;
295		opt->seen = 0;
296
297		/*
298		 * Do this early, so jumps to "bad" will free the current
299		 * option.
300		 */
301		TAILQ_INSERT_TAIL(opts, opt, link);
302
303		if (auio->uio_segflg == UIO_SYSSPACE) {
304			bcopy(auio->uio_iov[i].iov_base, opt->name, namelen);
305		} else {
306			error = copyin(auio->uio_iov[i].iov_base, opt->name,
307			    namelen);
308			if (error)
309				goto bad;
310		}
311		/* Ensure names are null-terminated strings. */
312		if (namelen == 0 || opt->name[namelen - 1] != '\0') {
313			error = EINVAL;
314			goto bad;
315		}
316		if (optlen != 0) {
317			opt->len = optlen;
318			opt->value = malloc(optlen, M_MOUNT, M_WAITOK);
319			if (auio->uio_segflg == UIO_SYSSPACE) {
320				bcopy(auio->uio_iov[i + 1].iov_base, opt->value,
321				    optlen);
322			} else {
323				error = copyin(auio->uio_iov[i + 1].iov_base,
324				    opt->value, optlen);
325				if (error)
326					goto bad;
327			}
328		}
329	}
330	vfs_sanitizeopts(opts);
331	*options = opts;
332	return (0);
333bad:
334	vfs_freeopts(opts);
335	return (error);
336}
337
338/*
339 * Merge the old mount options with the new ones passed
340 * in the MNT_UPDATE case.
341 *
342 * XXX: This function will keep a "nofoo" option in the new
343 * options.  E.g, if the option's canonical name is "foo",
344 * "nofoo" ends up in the mount point's active options.
345 */
346static void
347vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts)
348{
349	struct vfsopt *opt, *new;
350
351	TAILQ_FOREACH(opt, oldopts, link) {
352		new = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK);
353		new->name = strdup(opt->name, M_MOUNT);
354		if (opt->len != 0) {
355			new->value = malloc(opt->len, M_MOUNT, M_WAITOK);
356			bcopy(opt->value, new->value, opt->len);
357		} else
358			new->value = NULL;
359		new->len = opt->len;
360		new->seen = opt->seen;
361		TAILQ_INSERT_HEAD(toopts, new, link);
362	}
363	vfs_sanitizeopts(toopts);
364}
365
366/*
367 * Mount a filesystem.
368 */
369int
370sys_nmount(td, uap)
371	struct thread *td;
372	struct nmount_args /* {
373		struct iovec *iovp;
374		unsigned int iovcnt;
375		int flags;
376	} */ *uap;
377{
378	struct uio *auio;
379	int error;
380	u_int iovcnt;
381	uint64_t flags;
382
383	/*
384	 * Mount flags are now 64-bits. On 32-bit archtectures only
385	 * 32-bits are passed in, but from here on everything handles
386	 * 64-bit flags correctly.
387	 */
388	flags = uap->flags;
389
390	AUDIT_ARG_FFLAGS(flags);
391	CTR4(KTR_VFS, "%s: iovp %p with iovcnt %d and flags %d", __func__,
392	    uap->iovp, uap->iovcnt, flags);
393
394	/*
395	 * Filter out MNT_ROOTFS.  We do not want clients of nmount() in
396	 * userspace to set this flag, but we must filter it out if we want
397	 * MNT_UPDATE on the root file system to work.
398	 * MNT_ROOTFS should only be set by the kernel when mounting its
399	 * root file system.
400	 */
401	flags &= ~MNT_ROOTFS;
402
403	iovcnt = uap->iovcnt;
404	/*
405	 * Check that we have an even number of iovec's
406	 * and that we have at least two options.
407	 */
408	if ((iovcnt & 1) || (iovcnt < 4)) {
409		CTR2(KTR_VFS, "%s: failed for invalid iovcnt %d", __func__,
410		    uap->iovcnt);
411		return (EINVAL);
412	}
413
414	error = copyinuio(uap->iovp, iovcnt, &auio);
415	if (error) {
416		CTR2(KTR_VFS, "%s: failed for invalid uio op with %d errno",
417		    __func__, error);
418		return (error);
419	}
420	error = vfs_donmount(td, flags, auio);
421
422	free(auio, M_IOV);
423	return (error);
424}
425
426/*
427 * ---------------------------------------------------------------------
428 * Various utility functions
429 */
430
431void
432vfs_ref(struct mount *mp)
433{
434
435	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
436	MNT_ILOCK(mp);
437	MNT_REF(mp);
438	MNT_IUNLOCK(mp);
439}
440
441void
442vfs_rel(struct mount *mp)
443{
444
445	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
446	MNT_ILOCK(mp);
447	MNT_REL(mp);
448	MNT_IUNLOCK(mp);
449}
450
451/*
452 * Allocate and initialize the mount point struct.
453 */
454struct mount *
455vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
456    struct ucred *cred)
457{
458	struct mount *mp;
459
460	mp = uma_zalloc(mount_zone, M_WAITOK);
461	bzero(&mp->mnt_startzero,
462	    __rangeof(struct mount, mnt_startzero, mnt_endzero));
463	TAILQ_INIT(&mp->mnt_nvnodelist);
464	mp->mnt_nvnodelistsize = 0;
465	TAILQ_INIT(&mp->mnt_activevnodelist);
466	mp->mnt_activevnodelistsize = 0;
467	mp->mnt_ref = 0;
468	(void) vfs_busy(mp, MBF_NOWAIT);
469	atomic_add_acq_int(&vfsp->vfc_refcount, 1);
470	mp->mnt_op = vfsp->vfc_vfsops;
471	mp->mnt_vfc = vfsp;
472	mp->mnt_stat.f_type = vfsp->vfc_typenum;
473	mp->mnt_gen++;
474	strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
475	mp->mnt_vnodecovered = vp;
476	mp->mnt_cred = crdup(cred);
477	mp->mnt_stat.f_owner = cred->cr_uid;
478	strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN);
479	mp->mnt_iosize_max = DFLTPHYS;
480#ifdef MAC
481	mac_mount_init(mp);
482	mac_mount_create(cred, mp);
483#endif
484	arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0);
485	TAILQ_INIT(&mp->mnt_uppers);
486	return (mp);
487}
488
489/*
490 * Destroy the mount struct previously allocated by vfs_mount_alloc().
491 */
492void
493vfs_mount_destroy(struct mount *mp)
494{
495
496	MNT_ILOCK(mp);
497	mp->mnt_kern_flag |= MNTK_REFEXPIRE;
498	if (mp->mnt_kern_flag & MNTK_MWAIT) {
499		mp->mnt_kern_flag &= ~MNTK_MWAIT;
500		wakeup(mp);
501	}
502	while (mp->mnt_ref)
503		msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0);
504	KASSERT(mp->mnt_ref == 0,
505	    ("%s: invalid refcount in the drain path @ %s:%d", __func__,
506	    __FILE__, __LINE__));
507	if (mp->mnt_writeopcount != 0)
508		panic("vfs_mount_destroy: nonzero writeopcount");
509	if (mp->mnt_secondary_writes != 0)
510		panic("vfs_mount_destroy: nonzero secondary_writes");
511	atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1);
512	if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) {
513		struct vnode *vp;
514
515		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes)
516			vn_printf(vp, "dangling vnode ");
517		panic("unmount: dangling vnode");
518	}
519	KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers"));
520	if (mp->mnt_nvnodelistsize != 0)
521		panic("vfs_mount_destroy: nonzero nvnodelistsize");
522	if (mp->mnt_activevnodelistsize != 0)
523		panic("vfs_mount_destroy: nonzero activevnodelistsize");
524	if (mp->mnt_lockref != 0)
525		panic("vfs_mount_destroy: nonzero lock refcount");
526	MNT_IUNLOCK(mp);
527	if (mp->mnt_vnodecovered != NULL)
528		vrele(mp->mnt_vnodecovered);
529#ifdef MAC
530	mac_mount_destroy(mp);
531#endif
532	if (mp->mnt_opt != NULL)
533		vfs_freeopts(mp->mnt_opt);
534	crfree(mp->mnt_cred);
535	uma_zfree(mount_zone, mp);
536}
537
538int
539vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions)
540{
541	struct vfsoptlist *optlist;
542	struct vfsopt *opt, *tmp_opt;
543	char *fstype, *fspath, *errmsg;
544	int error, fstypelen, fspathlen, errmsg_len, errmsg_pos;
545
546	errmsg = fspath = NULL;
547	errmsg_len = fspathlen = 0;
548	errmsg_pos = -1;
549
550	error = vfs_buildopts(fsoptions, &optlist);
551	if (error)
552		return (error);
553
554	if (vfs_getopt(optlist, "errmsg", (void **)&errmsg, &errmsg_len) == 0)
555		errmsg_pos = vfs_getopt_pos(optlist, "errmsg");
556
557	/*
558	 * We need these two options before the others,
559	 * and they are mandatory for any filesystem.
560	 * Ensure they are NUL terminated as well.
561	 */
562	fstypelen = 0;
563	error = vfs_getopt(optlist, "fstype", (void **)&fstype, &fstypelen);
564	if (error || fstype[fstypelen - 1] != '\0') {
565		error = EINVAL;
566		if (errmsg != NULL)
567			strncpy(errmsg, "Invalid fstype", errmsg_len);
568		goto bail;
569	}
570	fspathlen = 0;
571	error = vfs_getopt(optlist, "fspath", (void **)&fspath, &fspathlen);
572	if (error || fspath[fspathlen - 1] != '\0') {
573		error = EINVAL;
574		if (errmsg != NULL)
575			strncpy(errmsg, "Invalid fspath", errmsg_len);
576		goto bail;
577	}
578
579	/*
580	 * We need to see if we have the "update" option
581	 * before we call vfs_domount(), since vfs_domount() has special
582	 * logic based on MNT_UPDATE.  This is very important
583	 * when we want to update the root filesystem.
584	 */
585	TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) {
586		if (strcmp(opt->name, "update") == 0) {
587			fsflags |= MNT_UPDATE;
588			vfs_freeopt(optlist, opt);
589		}
590		else if (strcmp(opt->name, "async") == 0)
591			fsflags |= MNT_ASYNC;
592		else if (strcmp(opt->name, "force") == 0) {
593			fsflags |= MNT_FORCE;
594			vfs_freeopt(optlist, opt);
595		}
596		else if (strcmp(opt->name, "reload") == 0) {
597			fsflags |= MNT_RELOAD;
598			vfs_freeopt(optlist, opt);
599		}
600		else if (strcmp(opt->name, "multilabel") == 0)
601			fsflags |= MNT_MULTILABEL;
602		else if (strcmp(opt->name, "noasync") == 0)
603			fsflags &= ~MNT_ASYNC;
604		else if (strcmp(opt->name, "noatime") == 0)
605			fsflags |= MNT_NOATIME;
606		else if (strcmp(opt->name, "atime") == 0) {
607			free(opt->name, M_MOUNT);
608			opt->name = strdup("nonoatime", M_MOUNT);
609		}
610		else if (strcmp(opt->name, "noclusterr") == 0)
611			fsflags |= MNT_NOCLUSTERR;
612		else if (strcmp(opt->name, "clusterr") == 0) {
613			free(opt->name, M_MOUNT);
614			opt->name = strdup("nonoclusterr", M_MOUNT);
615		}
616		else if (strcmp(opt->name, "noclusterw") == 0)
617			fsflags |= MNT_NOCLUSTERW;
618		else if (strcmp(opt->name, "clusterw") == 0) {
619			free(opt->name, M_MOUNT);
620			opt->name = strdup("nonoclusterw", M_MOUNT);
621		}
622		else if (strcmp(opt->name, "noexec") == 0)
623			fsflags |= MNT_NOEXEC;
624		else if (strcmp(opt->name, "exec") == 0) {
625			free(opt->name, M_MOUNT);
626			opt->name = strdup("nonoexec", M_MOUNT);
627		}
628		else if (strcmp(opt->name, "nosuid") == 0)
629			fsflags |= MNT_NOSUID;
630		else if (strcmp(opt->name, "suid") == 0) {
631			free(opt->name, M_MOUNT);
632			opt->name = strdup("nonosuid", M_MOUNT);
633		}
634		else if (strcmp(opt->name, "nosymfollow") == 0)
635			fsflags |= MNT_NOSYMFOLLOW;
636		else if (strcmp(opt->name, "symfollow") == 0) {
637			free(opt->name, M_MOUNT);
638			opt->name = strdup("nonosymfollow", M_MOUNT);
639		}
640		else if (strcmp(opt->name, "noro") == 0)
641			fsflags &= ~MNT_RDONLY;
642		else if (strcmp(opt->name, "rw") == 0)
643			fsflags &= ~MNT_RDONLY;
644		else if (strcmp(opt->name, "ro") == 0)
645			fsflags |= MNT_RDONLY;
646		else if (strcmp(opt->name, "rdonly") == 0) {
647			free(opt->name, M_MOUNT);
648			opt->name = strdup("ro", M_MOUNT);
649			fsflags |= MNT_RDONLY;
650		}
651		else if (strcmp(opt->name, "suiddir") == 0)
652			fsflags |= MNT_SUIDDIR;
653		else if (strcmp(opt->name, "sync") == 0)
654			fsflags |= MNT_SYNCHRONOUS;
655		else if (strcmp(opt->name, "union") == 0)
656			fsflags |= MNT_UNION;
657		else if (strcmp(opt->name, "automounted") == 0) {
658			fsflags |= MNT_AUTOMOUNTED;
659			vfs_freeopt(optlist, opt);
660		}
661	}
662
663	/*
664	 * Be ultra-paranoid about making sure the type and fspath
665	 * variables will fit in our mp buffers, including the
666	 * terminating NUL.
667	 */
668	if (fstypelen > MFSNAMELEN || fspathlen > MNAMELEN) {
669		error = ENAMETOOLONG;
670		goto bail;
671	}
672
673	error = vfs_domount(td, fstype, fspath, fsflags, &optlist);
674bail:
675	/* copyout the errmsg */
676	if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt)
677	    && errmsg_len > 0 && errmsg != NULL) {
678		if (fsoptions->uio_segflg == UIO_SYSSPACE) {
679			bcopy(errmsg,
680			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
681			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
682		} else {
683			copyout(errmsg,
684			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base,
685			    fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len);
686		}
687	}
688
689	if (optlist != NULL)
690		vfs_freeopts(optlist);
691	return (error);
692}
693
694/*
695 * Old mount API.
696 */
697#ifndef _SYS_SYSPROTO_H_
698struct mount_args {
699	char	*type;
700	char	*path;
701	int	flags;
702	caddr_t	data;
703};
704#endif
705/* ARGSUSED */
706int
707sys_mount(td, uap)
708	struct thread *td;
709	struct mount_args /* {
710		char *type;
711		char *path;
712		int flags;
713		caddr_t data;
714	} */ *uap;
715{
716	char *fstype;
717	struct vfsconf *vfsp = NULL;
718	struct mntarg *ma = NULL;
719	uint64_t flags;
720	int error;
721
722	/*
723	 * Mount flags are now 64-bits. On 32-bit architectures only
724	 * 32-bits are passed in, but from here on everything handles
725	 * 64-bit flags correctly.
726	 */
727	flags = uap->flags;
728
729	AUDIT_ARG_FFLAGS(flags);
730
731	/*
732	 * Filter out MNT_ROOTFS.  We do not want clients of mount() in
733	 * userspace to set this flag, but we must filter it out if we want
734	 * MNT_UPDATE on the root file system to work.
735	 * MNT_ROOTFS should only be set by the kernel when mounting its
736	 * root file system.
737	 */
738	flags &= ~MNT_ROOTFS;
739
740	fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK);
741	error = copyinstr(uap->type, fstype, MFSNAMELEN, NULL);
742	if (error) {
743		free(fstype, M_TEMP);
744		return (error);
745	}
746
747	AUDIT_ARG_TEXT(fstype);
748	vfsp = vfs_byname_kld(fstype, td, &error);
749	free(fstype, M_TEMP);
750	if (vfsp == NULL)
751		return (ENOENT);
752	if (vfsp->vfc_vfsops->vfs_cmount == NULL)
753		return (EOPNOTSUPP);
754
755	ma = mount_argsu(ma, "fstype", uap->type, MFSNAMELEN);
756	ma = mount_argsu(ma, "fspath", uap->path, MNAMELEN);
757	ma = mount_argb(ma, flags & MNT_RDONLY, "noro");
758	ma = mount_argb(ma, !(flags & MNT_NOSUID), "nosuid");
759	ma = mount_argb(ma, !(flags & MNT_NOEXEC), "noexec");
760
761	error = vfsp->vfc_vfsops->vfs_cmount(ma, uap->data, flags);
762	return (error);
763}
764
765/*
766 * vfs_domount_first(): first file system mount (not update)
767 */
768static int
769vfs_domount_first(
770	struct thread *td,		/* Calling thread. */
771	struct vfsconf *vfsp,		/* File system type. */
772	char *fspath,			/* Mount path. */
773	struct vnode *vp,		/* Vnode to be covered. */
774	uint64_t fsflags,		/* Flags common to all filesystems. */
775	struct vfsoptlist **optlist	/* Options local to the filesystem. */
776	)
777{
778	struct vattr va;
779	struct mount *mp;
780	struct vnode *newdp;
781	int error;
782
783	ASSERT_VOP_ELOCKED(vp, __func__);
784	KASSERT((fsflags & MNT_UPDATE) == 0, ("MNT_UPDATE shouldn't be here"));
785
786	/*
787	 * If the user is not root, ensure that they own the directory
788	 * onto which we are attempting to mount.
789	 */
790	error = VOP_GETATTR(vp, &va, td->td_ucred);
791	if (error == 0 && va.va_uid != td->td_ucred->cr_uid)
792		error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN, 0);
793	if (error == 0)
794		error = vinvalbuf(vp, V_SAVE, 0, 0);
795	if (error == 0 && vp->v_type != VDIR)
796		error = ENOTDIR;
797	if (error == 0) {
798		VI_LOCK(vp);
799		if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL)
800			vp->v_iflag |= VI_MOUNT;
801		else
802			error = EBUSY;
803		VI_UNLOCK(vp);
804	}
805	if (error != 0) {
806		vput(vp);
807		return (error);
808	}
809	VOP_UNLOCK(vp, 0);
810
811	/* Allocate and initialize the filesystem. */
812	mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred);
813	/* XXXMAC: pass to vfs_mount_alloc? */
814	mp->mnt_optnew = *optlist;
815	/* Set the mount level flags. */
816	mp->mnt_flag = (fsflags & (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY));
817
818	/*
819	 * Mount the filesystem.
820	 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
821	 * get.  No freeing of cn_pnbuf.
822	 */
823	error = VFS_MOUNT(mp);
824	if (error != 0) {
825		vfs_unbusy(mp);
826		mp->mnt_vnodecovered = NULL;
827		vfs_mount_destroy(mp);
828		VI_LOCK(vp);
829		vp->v_iflag &= ~VI_MOUNT;
830		VI_UNLOCK(vp);
831		vrele(vp);
832		return (error);
833	}
834
835	if (mp->mnt_opt != NULL)
836		vfs_freeopts(mp->mnt_opt);
837	mp->mnt_opt = mp->mnt_optnew;
838	*optlist = NULL;
839	(void)VFS_STATFS(mp, &mp->mnt_stat);
840
841	/*
842	 * Prevent external consumers of mount options from reading mnt_optnew.
843	 */
844	mp->mnt_optnew = NULL;
845
846	MNT_ILOCK(mp);
847	if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
848	    (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
849		mp->mnt_kern_flag |= MNTK_ASYNC;
850	else
851		mp->mnt_kern_flag &= ~MNTK_ASYNC;
852	MNT_IUNLOCK(mp);
853
854	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
855	cache_purge(vp);
856	VI_LOCK(vp);
857	vp->v_iflag &= ~VI_MOUNT;
858	VI_UNLOCK(vp);
859	vp->v_mountedhere = mp;
860	/* Place the new filesystem at the end of the mount list. */
861	mtx_lock(&mountlist_mtx);
862	TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
863	mtx_unlock(&mountlist_mtx);
864	vfs_event_signal(NULL, VQ_MOUNT, 0);
865	if (VFS_ROOT(mp, LK_EXCLUSIVE, &newdp))
866		panic("mount: lost mount");
867	VOP_UNLOCK(vp, 0);
868	EVENTHANDLER_INVOKE(vfs_mounted, mp, newdp, td);
869	VOP_UNLOCK(newdp, 0);
870	mountcheckdirs(vp, newdp);
871	vrele(newdp);
872	if ((mp->mnt_flag & MNT_RDONLY) == 0)
873		vfs_allocate_syncvnode(mp);
874	vfs_unbusy(mp);
875	return (0);
876}
877
878/*
879 * vfs_domount_update(): update of mounted file system
880 */
881static int
882vfs_domount_update(
883	struct thread *td,		/* Calling thread. */
884	struct vnode *vp,		/* Mount point vnode. */
885	uint64_t fsflags,		/* Flags common to all filesystems. */
886	struct vfsoptlist **optlist	/* Options local to the filesystem. */
887	)
888{
889	struct export_args export;
890	void *bufp;
891	struct mount *mp;
892	int error, export_error, len;
893	uint64_t flag;
894
895	ASSERT_VOP_ELOCKED(vp, __func__);
896	KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here"));
897	mp = vp->v_mount;
898
899	if ((vp->v_vflag & VV_ROOT) == 0) {
900		if (vfs_copyopt(*optlist, "export", &export, sizeof(export))
901		    == 0)
902			error = EXDEV;
903		else
904			error = EINVAL;
905		vput(vp);
906		return (error);
907	}
908
909	/*
910	 * We only allow the filesystem to be reloaded if it
911	 * is currently mounted read-only.
912	 */
913	flag = mp->mnt_flag;
914	if ((fsflags & MNT_RELOAD) != 0 && (flag & MNT_RDONLY) == 0) {
915		vput(vp);
916		return (EOPNOTSUPP);	/* Needs translation */
917	}
918	/*
919	 * Only privileged root, or (if MNT_USER is set) the user that
920	 * did the original mount is permitted to update it.
921	 */
922	error = vfs_suser(mp, td);
923	if (error != 0) {
924		vput(vp);
925		return (error);
926	}
927	if (vfs_busy(mp, MBF_NOWAIT)) {
928		vput(vp);
929		return (EBUSY);
930	}
931	VI_LOCK(vp);
932	if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) {
933		VI_UNLOCK(vp);
934		vfs_unbusy(mp);
935		vput(vp);
936		return (EBUSY);
937	}
938	vp->v_iflag |= VI_MOUNT;
939	VI_UNLOCK(vp);
940	VOP_UNLOCK(vp, 0);
941
942	MNT_ILOCK(mp);
943	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
944		MNT_IUNLOCK(mp);
945		error = EBUSY;
946		goto end;
947	}
948	mp->mnt_flag &= ~MNT_UPDATEMASK;
949	mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE |
950	    MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY);
951	if ((mp->mnt_flag & MNT_ASYNC) == 0)
952		mp->mnt_kern_flag &= ~MNTK_ASYNC;
953	MNT_IUNLOCK(mp);
954	mp->mnt_optnew = *optlist;
955	vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt);
956
957	/*
958	 * Mount the filesystem.
959	 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
960	 * get.  No freeing of cn_pnbuf.
961	 */
962	error = VFS_MOUNT(mp);
963
964	export_error = 0;
965	/* Process the export option. */
966	if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp,
967	    &len) == 0) {
968		/* Assume that there is only 1 ABI for each length. */
969		switch (len) {
970		case (sizeof(struct oexport_args)):
971			bzero(&export, sizeof(export));
972			/* FALLTHROUGH */
973		case (sizeof(export)):
974			bcopy(bufp, &export, len);
975			export_error = vfs_export(mp, &export);
976			break;
977		default:
978			export_error = EINVAL;
979			break;
980		}
981	}
982
983	MNT_ILOCK(mp);
984	if (error == 0) {
985		mp->mnt_flag &=	~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE |
986		    MNT_SNAPSHOT);
987	} else {
988		/*
989		 * If we fail, restore old mount flags. MNT_QUOTA is special,
990		 * because it is not part of MNT_UPDATEMASK, but it could have
991		 * changed in the meantime if quotactl(2) was called.
992		 * All in all we want current value of MNT_QUOTA, not the old
993		 * one.
994		 */
995		mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA);
996	}
997	if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
998	    (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
999		mp->mnt_kern_flag |= MNTK_ASYNC;
1000	else
1001		mp->mnt_kern_flag &= ~MNTK_ASYNC;
1002	MNT_IUNLOCK(mp);
1003
1004	if (error != 0)
1005		goto end;
1006
1007	if (mp->mnt_opt != NULL)
1008		vfs_freeopts(mp->mnt_opt);
1009	mp->mnt_opt = mp->mnt_optnew;
1010	*optlist = NULL;
1011	(void)VFS_STATFS(mp, &mp->mnt_stat);
1012	/*
1013	 * Prevent external consumers of mount options from reading
1014	 * mnt_optnew.
1015	 */
1016	mp->mnt_optnew = NULL;
1017
1018	if ((mp->mnt_flag & MNT_RDONLY) == 0)
1019		vfs_allocate_syncvnode(mp);
1020	else
1021		vfs_deallocate_syncvnode(mp);
1022end:
1023	vfs_unbusy(mp);
1024	VI_LOCK(vp);
1025	vp->v_iflag &= ~VI_MOUNT;
1026	VI_UNLOCK(vp);
1027	vrele(vp);
1028	return (error != 0 ? error : export_error);
1029}
1030
1031/*
1032 * vfs_domount(): actually attempt a filesystem mount.
1033 */
1034static int
1035vfs_domount(
1036	struct thread *td,		/* Calling thread. */
1037	const char *fstype,		/* Filesystem type. */
1038	char *fspath,			/* Mount path. */
1039	uint64_t fsflags,		/* Flags common to all filesystems. */
1040	struct vfsoptlist **optlist	/* Options local to the filesystem. */
1041	)
1042{
1043	struct vfsconf *vfsp;
1044	struct nameidata nd;
1045	struct vnode *vp;
1046	char *pathbuf;
1047	int error;
1048
1049	/*
1050	 * Be ultra-paranoid about making sure the type and fspath
1051	 * variables will fit in our mp buffers, including the
1052	 * terminating NUL.
1053	 */
1054	if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
1055		return (ENAMETOOLONG);
1056
1057	if (jailed(td->td_ucred) || usermount == 0) {
1058		if ((error = priv_check(td, PRIV_VFS_MOUNT)) != 0)
1059			return (error);
1060	}
1061
1062	/*
1063	 * Do not allow NFS export or MNT_SUIDDIR by unprivileged users.
1064	 */
1065	if (fsflags & MNT_EXPORTED) {
1066		error = priv_check(td, PRIV_VFS_MOUNT_EXPORTED);
1067		if (error)
1068			return (error);
1069	}
1070	if (fsflags & MNT_SUIDDIR) {
1071		error = priv_check(td, PRIV_VFS_MOUNT_SUIDDIR);
1072		if (error)
1073			return (error);
1074	}
1075	/*
1076	 * Silently enforce MNT_NOSUID and MNT_USER for unprivileged users.
1077	 */
1078	if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) {
1079		if (priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0)
1080			fsflags |= MNT_NOSUID | MNT_USER;
1081	}
1082
1083	/* Load KLDs before we lock the covered vnode to avoid reversals. */
1084	vfsp = NULL;
1085	if ((fsflags & MNT_UPDATE) == 0) {
1086		/* Don't try to load KLDs if we're mounting the root. */
1087		if (fsflags & MNT_ROOTFS)
1088			vfsp = vfs_byname(fstype);
1089		else
1090			vfsp = vfs_byname_kld(fstype, td, &error);
1091		if (vfsp == NULL)
1092			return (ENODEV);
1093		if (jailed(td->td_ucred) && !(vfsp->vfc_flags & VFCF_JAIL))
1094			return (EPERM);
1095	}
1096
1097	/*
1098	 * Get vnode to be covered or mount point's vnode in case of MNT_UPDATE.
1099	 */
1100	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1101	    UIO_SYSSPACE, fspath, td);
1102	error = namei(&nd);
1103	if (error != 0)
1104		return (error);
1105	NDFREE(&nd, NDF_ONLY_PNBUF);
1106	vp = nd.ni_vp;
1107	if ((fsflags & MNT_UPDATE) == 0) {
1108		pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1109		strcpy(pathbuf, fspath);
1110		error = vn_path_to_global_path(td, vp, pathbuf, MNAMELEN);
1111		/* debug.disablefullpath == 1 results in ENODEV */
1112		if (error == 0 || error == ENODEV) {
1113			error = vfs_domount_first(td, vfsp, pathbuf, vp,
1114			    fsflags, optlist);
1115		}
1116		free(pathbuf, M_TEMP);
1117	} else
1118		error = vfs_domount_update(td, vp, fsflags, optlist);
1119
1120	return (error);
1121}
1122
1123/*
1124 * Unmount a filesystem.
1125 *
1126 * Note: unmount takes a path to the vnode mounted on as argument, not
1127 * special file (as before).
1128 */
1129#ifndef _SYS_SYSPROTO_H_
1130struct unmount_args {
1131	char	*path;
1132	int	flags;
1133};
1134#endif
1135/* ARGSUSED */
1136int
1137sys_unmount(struct thread *td, struct unmount_args *uap)
1138{
1139	struct nameidata nd;
1140	struct mount *mp;
1141	char *pathbuf;
1142	int error, id0, id1;
1143
1144	AUDIT_ARG_VALUE(uap->flags);
1145	if (jailed(td->td_ucred) || usermount == 0) {
1146		error = priv_check(td, PRIV_VFS_UNMOUNT);
1147		if (error)
1148			return (error);
1149	}
1150
1151	pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK);
1152	error = copyinstr(uap->path, pathbuf, MNAMELEN, NULL);
1153	if (error) {
1154		free(pathbuf, M_TEMP);
1155		return (error);
1156	}
1157	if (uap->flags & MNT_BYFSID) {
1158		AUDIT_ARG_TEXT(pathbuf);
1159		/* Decode the filesystem ID. */
1160		if (sscanf(pathbuf, "FSID:%d:%d", &id0, &id1) != 2) {
1161			free(pathbuf, M_TEMP);
1162			return (EINVAL);
1163		}
1164
1165		mtx_lock(&mountlist_mtx);
1166		TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1167			if (mp->mnt_stat.f_fsid.val[0] == id0 &&
1168			    mp->mnt_stat.f_fsid.val[1] == id1) {
1169				vfs_ref(mp);
1170				break;
1171			}
1172		}
1173		mtx_unlock(&mountlist_mtx);
1174	} else {
1175		/*
1176		 * Try to find global path for path argument.
1177		 */
1178		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1,
1179		    UIO_SYSSPACE, pathbuf, td);
1180		if (namei(&nd) == 0) {
1181			NDFREE(&nd, NDF_ONLY_PNBUF);
1182			error = vn_path_to_global_path(td, nd.ni_vp, pathbuf,
1183			    MNAMELEN);
1184			if (error == 0 || error == ENODEV)
1185				vput(nd.ni_vp);
1186		}
1187		mtx_lock(&mountlist_mtx);
1188		TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) {
1189			if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) {
1190				vfs_ref(mp);
1191				break;
1192			}
1193		}
1194		mtx_unlock(&mountlist_mtx);
1195	}
1196	free(pathbuf, M_TEMP);
1197	if (mp == NULL) {
1198		/*
1199		 * Previously we returned ENOENT for a nonexistent path and
1200		 * EINVAL for a non-mountpoint.  We cannot tell these apart
1201		 * now, so in the !MNT_BYFSID case return the more likely
1202		 * EINVAL for compatibility.
1203		 */
1204		return ((uap->flags & MNT_BYFSID) ? ENOENT : EINVAL);
1205	}
1206
1207	/*
1208	 * Don't allow unmounting the root filesystem.
1209	 */
1210	if (mp->mnt_flag & MNT_ROOTFS) {
1211		vfs_rel(mp);
1212		return (EINVAL);
1213	}
1214	error = dounmount(mp, uap->flags, td);
1215	return (error);
1216}
1217
1218/*
1219 * Return error if any of the vnodes, ignoring the root vnode
1220 * and the syncer vnode, have non-zero usecount.
1221 *
1222 * This function is purely advisory - it can return false positives
1223 * and negatives.
1224 */
1225static int
1226vfs_check_usecounts(struct mount *mp)
1227{
1228	struct vnode *vp, *mvp;
1229
1230	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1231		if ((vp->v_vflag & VV_ROOT) == 0 && vp->v_type != VNON &&
1232		    vp->v_usecount != 0) {
1233			VI_UNLOCK(vp);
1234			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1235			return (EBUSY);
1236		}
1237		VI_UNLOCK(vp);
1238	}
1239
1240	return (0);
1241}
1242
1243static void
1244dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
1245{
1246
1247	mtx_assert(MNT_MTX(mp), MA_OWNED);
1248	mp->mnt_kern_flag &= ~mntkflags;
1249	if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) {
1250		mp->mnt_kern_flag &= ~MNTK_MWAIT;
1251		wakeup(mp);
1252	}
1253	MNT_IUNLOCK(mp);
1254	if (coveredvp != NULL) {
1255		VOP_UNLOCK(coveredvp, 0);
1256		vdrop(coveredvp);
1257	}
1258	vn_finished_write(mp);
1259}
1260
1261/*
1262 * Do the actual filesystem unmount.
1263 */
1264int
1265dounmount(struct mount *mp, int flags, struct thread *td)
1266{
1267	struct vnode *coveredvp, *fsrootvp;
1268	int error;
1269	uint64_t async_flag;
1270	int mnt_gen_r;
1271
1272	if ((coveredvp = mp->mnt_vnodecovered) != NULL) {
1273		mnt_gen_r = mp->mnt_gen;
1274		VI_LOCK(coveredvp);
1275		vholdl(coveredvp);
1276		vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
1277		/*
1278		 * Check for mp being unmounted while waiting for the
1279		 * covered vnode lock.
1280		 */
1281		if (coveredvp->v_mountedhere != mp ||
1282		    coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) {
1283			VOP_UNLOCK(coveredvp, 0);
1284			vdrop(coveredvp);
1285			vfs_rel(mp);
1286			return (EBUSY);
1287		}
1288	}
1289
1290	/*
1291	 * Only privileged root, or (if MNT_USER is set) the user that did the
1292	 * original mount is permitted to unmount this filesystem.
1293	 */
1294	error = vfs_suser(mp, td);
1295	if (error != 0) {
1296		if (coveredvp != NULL) {
1297			VOP_UNLOCK(coveredvp, 0);
1298			vdrop(coveredvp);
1299		}
1300		vfs_rel(mp);
1301		return (error);
1302	}
1303
1304	vn_start_write(NULL, &mp, V_WAIT | V_MNTREF);
1305	MNT_ILOCK(mp);
1306	if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
1307	    (mp->mnt_flag & MNT_UPDATE) != 0 ||
1308	    !TAILQ_EMPTY(&mp->mnt_uppers)) {
1309		dounmount_cleanup(mp, coveredvp, 0);
1310		return (EBUSY);
1311	}
1312	mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_NOINSMNTQ;
1313	if (flags & MNT_NONBUSY) {
1314		MNT_IUNLOCK(mp);
1315		error = vfs_check_usecounts(mp);
1316		MNT_ILOCK(mp);
1317		if (error != 0) {
1318			dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT |
1319			    MNTK_NOINSMNTQ);
1320			return (error);
1321		}
1322	}
1323	/* Allow filesystems to detect that a forced unmount is in progress. */
1324	if (flags & MNT_FORCE) {
1325		mp->mnt_kern_flag |= MNTK_UNMOUNTF;
1326		MNT_IUNLOCK(mp);
1327		/*
1328		 * Must be done after setting MNTK_UNMOUNTF and before
1329		 * waiting for mnt_lockref to become 0.
1330		 */
1331		VFS_PURGE(mp);
1332		MNT_ILOCK(mp);
1333	}
1334	error = 0;
1335	if (mp->mnt_lockref) {
1336		mp->mnt_kern_flag |= MNTK_DRAINING;
1337		error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS,
1338		    "mount drain", 0);
1339	}
1340	MNT_IUNLOCK(mp);
1341	KASSERT(mp->mnt_lockref == 0,
1342	    ("%s: invalid lock refcount in the drain path @ %s:%d",
1343	    __func__, __FILE__, __LINE__));
1344	KASSERT(error == 0,
1345	    ("%s: invalid return value for msleep in the drain path @ %s:%d",
1346	    __func__, __FILE__, __LINE__));
1347
1348	if (mp->mnt_flag & MNT_EXPUBLIC)
1349		vfs_setpublicfs(NULL, NULL, NULL);
1350
1351	/*
1352	 * From now, we can claim that the use reference on the
1353	 * coveredvp is ours, and the ref can be released only by
1354	 * successfull unmount by us, or left for later unmount
1355	 * attempt.  The previously acquired hold reference is no
1356	 * longer needed to protect the vnode from reuse.
1357	 */
1358	if (coveredvp != NULL)
1359		vdrop(coveredvp);
1360
1361	vfs_msync(mp, MNT_WAIT);
1362	MNT_ILOCK(mp);
1363	async_flag = mp->mnt_flag & MNT_ASYNC;
1364	mp->mnt_flag &= ~MNT_ASYNC;
1365	mp->mnt_kern_flag &= ~MNTK_ASYNC;
1366	MNT_IUNLOCK(mp);
1367	cache_purgevfs(mp, false); /* remove cache entries for this file sys */
1368	vfs_deallocate_syncvnode(mp);
1369	/*
1370	 * For forced unmounts, move process cdir/rdir refs on the fs root
1371	 * vnode to the covered vnode.  For non-forced unmounts we want
1372	 * such references to cause an EBUSY error.
1373	 */
1374	if ((flags & MNT_FORCE) &&
1375	    VFS_ROOT(mp, LK_EXCLUSIVE, &fsrootvp) == 0) {
1376		if (mp->mnt_vnodecovered != NULL &&
1377		    (mp->mnt_flag & MNT_IGNORE) == 0)
1378			mountcheckdirs(fsrootvp, mp->mnt_vnodecovered);
1379		if (fsrootvp == rootvnode) {
1380			vrele(rootvnode);
1381			rootvnode = NULL;
1382		}
1383		vput(fsrootvp);
1384	}
1385	if ((mp->mnt_flag & MNT_RDONLY) != 0 || (flags & MNT_FORCE) != 0 ||
1386	    (error = VFS_SYNC(mp, MNT_WAIT)) == 0)
1387		error = VFS_UNMOUNT(mp, flags);
1388	vn_finished_write(mp);
1389	/*
1390	 * If we failed to flush the dirty blocks for this mount point,
1391	 * undo all the cdir/rdir and rootvnode changes we made above.
1392	 * Unless we failed to do so because the device is reporting that
1393	 * it doesn't exist anymore.
1394	 */
1395	if (error && error != ENXIO) {
1396		if ((flags & MNT_FORCE) &&
1397		    VFS_ROOT(mp, LK_EXCLUSIVE, &fsrootvp) == 0) {
1398			if (mp->mnt_vnodecovered != NULL &&
1399			    (mp->mnt_flag & MNT_IGNORE) == 0)
1400				mountcheckdirs(mp->mnt_vnodecovered, fsrootvp);
1401			if (rootvnode == NULL) {
1402				rootvnode = fsrootvp;
1403				vref(rootvnode);
1404			}
1405			vput(fsrootvp);
1406		}
1407		MNT_ILOCK(mp);
1408		mp->mnt_kern_flag &= ~MNTK_NOINSMNTQ;
1409		if ((mp->mnt_flag & MNT_RDONLY) == 0) {
1410			MNT_IUNLOCK(mp);
1411			vfs_allocate_syncvnode(mp);
1412			MNT_ILOCK(mp);
1413		}
1414		mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
1415		mp->mnt_flag |= async_flag;
1416		if ((mp->mnt_flag & MNT_ASYNC) != 0 &&
1417		    (mp->mnt_kern_flag & MNTK_NOASYNC) == 0)
1418			mp->mnt_kern_flag |= MNTK_ASYNC;
1419		if (mp->mnt_kern_flag & MNTK_MWAIT) {
1420			mp->mnt_kern_flag &= ~MNTK_MWAIT;
1421			wakeup(mp);
1422		}
1423		MNT_IUNLOCK(mp);
1424		if (coveredvp)
1425			VOP_UNLOCK(coveredvp, 0);
1426		return (error);
1427	}
1428	mtx_lock(&mountlist_mtx);
1429	TAILQ_REMOVE(&mountlist, mp, mnt_list);
1430	mtx_unlock(&mountlist_mtx);
1431	EVENTHANDLER_INVOKE(vfs_unmounted, mp, td);
1432	if (coveredvp != NULL) {
1433		coveredvp->v_mountedhere = NULL;
1434		VOP_UNLOCK(coveredvp, 0);
1435	}
1436	vfs_event_signal(NULL, VQ_UNMOUNT, 0);
1437	if (mp == rootdevmp)
1438		rootdevmp = NULL;
1439	vfs_mount_destroy(mp);
1440	return (0);
1441}
1442
1443/*
1444 * Report errors during filesystem mounting.
1445 */
1446void
1447vfs_mount_error(struct mount *mp, const char *fmt, ...)
1448{
1449	struct vfsoptlist *moptlist = mp->mnt_optnew;
1450	va_list ap;
1451	int error, len;
1452	char *errmsg;
1453
1454	error = vfs_getopt(moptlist, "errmsg", (void **)&errmsg, &len);
1455	if (error || errmsg == NULL || len <= 0)
1456		return;
1457
1458	va_start(ap, fmt);
1459	vsnprintf(errmsg, (size_t)len, fmt, ap);
1460	va_end(ap);
1461}
1462
1463void
1464vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...)
1465{
1466	va_list ap;
1467	int error, len;
1468	char *errmsg;
1469
1470	error = vfs_getopt(opts, "errmsg", (void **)&errmsg, &len);
1471	if (error || errmsg == NULL || len <= 0)
1472		return;
1473
1474	va_start(ap, fmt);
1475	vsnprintf(errmsg, (size_t)len, fmt, ap);
1476	va_end(ap);
1477}
1478
1479/*
1480 * ---------------------------------------------------------------------
1481 * Functions for querying mount options/arguments from filesystems.
1482 */
1483
1484/*
1485 * Check that no unknown options are given
1486 */
1487int
1488vfs_filteropt(struct vfsoptlist *opts, const char **legal)
1489{
1490	struct vfsopt *opt;
1491	char errmsg[255];
1492	const char **t, *p, *q;
1493	int ret = 0;
1494
1495	TAILQ_FOREACH(opt, opts, link) {
1496		p = opt->name;
1497		q = NULL;
1498		if (p[0] == 'n' && p[1] == 'o')
1499			q = p + 2;
1500		for(t = global_opts; *t != NULL; t++) {
1501			if (strcmp(*t, p) == 0)
1502				break;
1503			if (q != NULL) {
1504				if (strcmp(*t, q) == 0)
1505					break;
1506			}
1507		}
1508		if (*t != NULL)
1509			continue;
1510		for(t = legal; *t != NULL; t++) {
1511			if (strcmp(*t, p) == 0)
1512				break;
1513			if (q != NULL) {
1514				if (strcmp(*t, q) == 0)
1515					break;
1516			}
1517		}
1518		if (*t != NULL)
1519			continue;
1520		snprintf(errmsg, sizeof(errmsg),
1521		    "mount option <%s> is unknown", p);
1522		ret = EINVAL;
1523	}
1524	if (ret != 0) {
1525		TAILQ_FOREACH(opt, opts, link) {
1526			if (strcmp(opt->name, "errmsg") == 0) {
1527				strncpy((char *)opt->value, errmsg, opt->len);
1528				break;
1529			}
1530		}
1531		if (opt == NULL)
1532			printf("%s\n", errmsg);
1533	}
1534	return (ret);
1535}
1536
1537/*
1538 * Get a mount option by its name.
1539 *
1540 * Return 0 if the option was found, ENOENT otherwise.
1541 * If len is non-NULL it will be filled with the length
1542 * of the option. If buf is non-NULL, it will be filled
1543 * with the address of the option.
1544 */
1545int
1546vfs_getopt(opts, name, buf, len)
1547	struct vfsoptlist *opts;
1548	const char *name;
1549	void **buf;
1550	int *len;
1551{
1552	struct vfsopt *opt;
1553
1554	KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
1555
1556	TAILQ_FOREACH(opt, opts, link) {
1557		if (strcmp(name, opt->name) == 0) {
1558			opt->seen = 1;
1559			if (len != NULL)
1560				*len = opt->len;
1561			if (buf != NULL)
1562				*buf = opt->value;
1563			return (0);
1564		}
1565	}
1566	return (ENOENT);
1567}
1568
1569int
1570vfs_getopt_pos(struct vfsoptlist *opts, const char *name)
1571{
1572	struct vfsopt *opt;
1573
1574	if (opts == NULL)
1575		return (-1);
1576
1577	TAILQ_FOREACH(opt, opts, link) {
1578		if (strcmp(name, opt->name) == 0) {
1579			opt->seen = 1;
1580			return (opt->pos);
1581		}
1582	}
1583	return (-1);
1584}
1585
1586int
1587vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value)
1588{
1589	char *opt_value, *vtp;
1590	quad_t iv;
1591	int error, opt_len;
1592
1593	error = vfs_getopt(opts, name, (void **)&opt_value, &opt_len);
1594	if (error != 0)
1595		return (error);
1596	if (opt_len == 0 || opt_value == NULL)
1597		return (EINVAL);
1598	if (opt_value[0] == '\0' || opt_value[opt_len - 1] != '\0')
1599		return (EINVAL);
1600	iv = strtoq(opt_value, &vtp, 0);
1601	if (vtp == opt_value || (vtp[0] != '\0' && vtp[1] != '\0'))
1602		return (EINVAL);
1603	if (iv < 0)
1604		return (EINVAL);
1605	switch (vtp[0]) {
1606	case 't':
1607	case 'T':
1608		iv *= 1024;
1609	case 'g':
1610	case 'G':
1611		iv *= 1024;
1612	case 'm':
1613	case 'M':
1614		iv *= 1024;
1615	case 'k':
1616	case 'K':
1617		iv *= 1024;
1618	case '\0':
1619		break;
1620	default:
1621		return (EINVAL);
1622	}
1623	*value = iv;
1624
1625	return (0);
1626}
1627
1628char *
1629vfs_getopts(struct vfsoptlist *opts, const char *name, int *error)
1630{
1631	struct vfsopt *opt;
1632
1633	*error = 0;
1634	TAILQ_FOREACH(opt, opts, link) {
1635		if (strcmp(name, opt->name) != 0)
1636			continue;
1637		opt->seen = 1;
1638		if (opt->len == 0 ||
1639		    ((char *)opt->value)[opt->len - 1] != '\0') {
1640			*error = EINVAL;
1641			return (NULL);
1642		}
1643		return (opt->value);
1644	}
1645	*error = ENOENT;
1646	return (NULL);
1647}
1648
1649int
1650vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w,
1651	uint64_t val)
1652{
1653	struct vfsopt *opt;
1654
1655	TAILQ_FOREACH(opt, opts, link) {
1656		if (strcmp(name, opt->name) == 0) {
1657			opt->seen = 1;
1658			if (w != NULL)
1659				*w |= val;
1660			return (1);
1661		}
1662	}
1663	if (w != NULL)
1664		*w &= ~val;
1665	return (0);
1666}
1667
1668int
1669vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...)
1670{
1671	va_list ap;
1672	struct vfsopt *opt;
1673	int ret;
1674
1675	KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL"));
1676
1677	TAILQ_FOREACH(opt, opts, link) {
1678		if (strcmp(name, opt->name) != 0)
1679			continue;
1680		opt->seen = 1;
1681		if (opt->len == 0 || opt->value == NULL)
1682			return (0);
1683		if (((char *)opt->value)[opt->len - 1] != '\0')
1684			return (0);
1685		va_start(ap, fmt);
1686		ret = vsscanf(opt->value, fmt, ap);
1687		va_end(ap);
1688		return (ret);
1689	}
1690	return (0);
1691}
1692
1693int
1694vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len)
1695{
1696	struct vfsopt *opt;
1697
1698	TAILQ_FOREACH(opt, opts, link) {
1699		if (strcmp(name, opt->name) != 0)
1700			continue;
1701		opt->seen = 1;
1702		if (opt->value == NULL)
1703			opt->len = len;
1704		else {
1705			if (opt->len != len)
1706				return (EINVAL);
1707			bcopy(value, opt->value, len);
1708		}
1709		return (0);
1710	}
1711	return (ENOENT);
1712}
1713
1714int
1715vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len)
1716{
1717	struct vfsopt *opt;
1718
1719	TAILQ_FOREACH(opt, opts, link) {
1720		if (strcmp(name, opt->name) != 0)
1721			continue;
1722		opt->seen = 1;
1723		if (opt->value == NULL)
1724			opt->len = len;
1725		else {
1726			if (opt->len < len)
1727				return (EINVAL);
1728			opt->len = len;
1729			bcopy(value, opt->value, len);
1730		}
1731		return (0);
1732	}
1733	return (ENOENT);
1734}
1735
1736int
1737vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value)
1738{
1739	struct vfsopt *opt;
1740
1741	TAILQ_FOREACH(opt, opts, link) {
1742		if (strcmp(name, opt->name) != 0)
1743			continue;
1744		opt->seen = 1;
1745		if (opt->value == NULL)
1746			opt->len = strlen(value) + 1;
1747		else if (strlcpy(opt->value, value, opt->len) >= opt->len)
1748			return (EINVAL);
1749		return (0);
1750	}
1751	return (ENOENT);
1752}
1753
1754/*
1755 * Find and copy a mount option.
1756 *
1757 * The size of the buffer has to be specified
1758 * in len, if it is not the same length as the
1759 * mount option, EINVAL is returned.
1760 * Returns ENOENT if the option is not found.
1761 */
1762int
1763vfs_copyopt(opts, name, dest, len)
1764	struct vfsoptlist *opts;
1765	const char *name;
1766	void *dest;
1767	int len;
1768{
1769	struct vfsopt *opt;
1770
1771	KASSERT(opts != NULL, ("vfs_copyopt: caller passed 'opts' as NULL"));
1772
1773	TAILQ_FOREACH(opt, opts, link) {
1774		if (strcmp(name, opt->name) == 0) {
1775			opt->seen = 1;
1776			if (len != opt->len)
1777				return (EINVAL);
1778			bcopy(opt->value, dest, opt->len);
1779			return (0);
1780		}
1781	}
1782	return (ENOENT);
1783}
1784
1785int
1786__vfs_statfs(struct mount *mp, struct statfs *sbp)
1787{
1788	int error;
1789
1790	error = mp->mnt_op->vfs_statfs(mp, &mp->mnt_stat);
1791	if (sbp != &mp->mnt_stat)
1792		*sbp = mp->mnt_stat;
1793	return (error);
1794}
1795
1796void
1797vfs_mountedfrom(struct mount *mp, const char *from)
1798{
1799
1800	bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname);
1801	strlcpy(mp->mnt_stat.f_mntfromname, from,
1802	    sizeof mp->mnt_stat.f_mntfromname);
1803}
1804
1805/*
1806 * ---------------------------------------------------------------------
1807 * This is the api for building mount args and mounting filesystems from
1808 * inside the kernel.
1809 *
1810 * The API works by accumulation of individual args.  First error is
1811 * latched.
1812 *
1813 * XXX: should be documented in new manpage kernel_mount(9)
1814 */
1815
1816/* A memory allocation which must be freed when we are done */
1817struct mntaarg {
1818	SLIST_ENTRY(mntaarg)	next;
1819};
1820
1821/* The header for the mount arguments */
1822struct mntarg {
1823	struct iovec *v;
1824	int len;
1825	int error;
1826	SLIST_HEAD(, mntaarg)	list;
1827};
1828
1829/*
1830 * Add a boolean argument.
1831 *
1832 * flag is the boolean value.
1833 * name must start with "no".
1834 */
1835struct mntarg *
1836mount_argb(struct mntarg *ma, int flag, const char *name)
1837{
1838
1839	KASSERT(name[0] == 'n' && name[1] == 'o',
1840	    ("mount_argb(...,%s): name must start with 'no'", name));
1841
1842	return (mount_arg(ma, name + (flag ? 2 : 0), NULL, 0));
1843}
1844
1845/*
1846 * Add an argument printf style
1847 */
1848struct mntarg *
1849mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...)
1850{
1851	va_list ap;
1852	struct mntaarg *maa;
1853	struct sbuf *sb;
1854	int len;
1855
1856	if (ma == NULL) {
1857		ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1858		SLIST_INIT(&ma->list);
1859	}
1860	if (ma->error)
1861		return (ma);
1862
1863	ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
1864	    M_MOUNT, M_WAITOK);
1865	ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
1866	ma->v[ma->len].iov_len = strlen(name) + 1;
1867	ma->len++;
1868
1869	sb = sbuf_new_auto();
1870	va_start(ap, fmt);
1871	sbuf_vprintf(sb, fmt, ap);
1872	va_end(ap);
1873	sbuf_finish(sb);
1874	len = sbuf_len(sb) + 1;
1875	maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
1876	SLIST_INSERT_HEAD(&ma->list, maa, next);
1877	bcopy(sbuf_data(sb), maa + 1, len);
1878	sbuf_delete(sb);
1879
1880	ma->v[ma->len].iov_base = maa + 1;
1881	ma->v[ma->len].iov_len = len;
1882	ma->len++;
1883
1884	return (ma);
1885}
1886
1887/*
1888 * Add an argument which is a userland string.
1889 */
1890struct mntarg *
1891mount_argsu(struct mntarg *ma, const char *name, const void *val, int len)
1892{
1893	struct mntaarg *maa;
1894	char *tbuf;
1895
1896	if (val == NULL)
1897		return (ma);
1898	if (ma == NULL) {
1899		ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1900		SLIST_INIT(&ma->list);
1901	}
1902	if (ma->error)
1903		return (ma);
1904	maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO);
1905	SLIST_INSERT_HEAD(&ma->list, maa, next);
1906	tbuf = (void *)(maa + 1);
1907	ma->error = copyinstr(val, tbuf, len, NULL);
1908	return (mount_arg(ma, name, tbuf, -1));
1909}
1910
1911/*
1912 * Plain argument.
1913 *
1914 * If length is -1, treat value as a C string.
1915 */
1916struct mntarg *
1917mount_arg(struct mntarg *ma, const char *name, const void *val, int len)
1918{
1919
1920	if (ma == NULL) {
1921		ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO);
1922		SLIST_INIT(&ma->list);
1923	}
1924	if (ma->error)
1925		return (ma);
1926
1927	ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2),
1928	    M_MOUNT, M_WAITOK);
1929	ma->v[ma->len].iov_base = (void *)(uintptr_t)name;
1930	ma->v[ma->len].iov_len = strlen(name) + 1;
1931	ma->len++;
1932
1933	ma->v[ma->len].iov_base = (void *)(uintptr_t)val;
1934	if (len < 0)
1935		ma->v[ma->len].iov_len = strlen(val) + 1;
1936	else
1937		ma->v[ma->len].iov_len = len;
1938	ma->len++;
1939	return (ma);
1940}
1941
1942/*
1943 * Free a mntarg structure
1944 */
1945static void
1946free_mntarg(struct mntarg *ma)
1947{
1948	struct mntaarg *maa;
1949
1950	while (!SLIST_EMPTY(&ma->list)) {
1951		maa = SLIST_FIRST(&ma->list);
1952		SLIST_REMOVE_HEAD(&ma->list, next);
1953		free(maa, M_MOUNT);
1954	}
1955	free(ma->v, M_MOUNT);
1956	free(ma, M_MOUNT);
1957}
1958
1959/*
1960 * Mount a filesystem
1961 */
1962int
1963kernel_mount(struct mntarg *ma, uint64_t flags)
1964{
1965	struct uio auio;
1966	int error;
1967
1968	KASSERT(ma != NULL, ("kernel_mount NULL ma"));
1969	KASSERT(ma->v != NULL, ("kernel_mount NULL ma->v"));
1970	KASSERT(!(ma->len & 1), ("kernel_mount odd ma->len (%d)", ma->len));
1971
1972	auio.uio_iov = ma->v;
1973	auio.uio_iovcnt = ma->len;
1974	auio.uio_segflg = UIO_SYSSPACE;
1975
1976	error = ma->error;
1977	if (!error)
1978		error = vfs_donmount(curthread, flags, &auio);
1979	free_mntarg(ma);
1980	return (error);
1981}
1982
1983/*
1984 * A printflike function to mount a filesystem.
1985 */
1986int
1987kernel_vmount(int flags, ...)
1988{
1989	struct mntarg *ma = NULL;
1990	va_list ap;
1991	const char *cp;
1992	const void *vp;
1993	int error;
1994
1995	va_start(ap, flags);
1996	for (;;) {
1997		cp = va_arg(ap, const char *);
1998		if (cp == NULL)
1999			break;
2000		vp = va_arg(ap, const void *);
2001		ma = mount_arg(ma, cp, vp, (vp != NULL ? -1 : 0));
2002	}
2003	va_end(ap);
2004
2005	error = kernel_mount(ma, flags);
2006	return (error);
2007}
2008
2009void
2010vfs_oexport_conv(const struct oexport_args *oexp, struct export_args *exp)
2011{
2012
2013	bcopy(oexp, exp, sizeof(*oexp));
2014	exp->ex_numsecflavors = 0;
2015}
2016