1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1994, 1995 The Regents of the University of California.
5 * Copyright (c) 1994, 1995 Jan-Simon Pendry.
6 * Copyright (c) 2005, 2006, 2012 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc.
7 * Copyright (c) 2006, 2012 Daichi Goto <daichi@freebsd.org>
8 * All rights reserved.
9 *
10 * This code is derived from software donated to Berkeley by
11 * Jan-Simon Pendry.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kdb.h>
41#include <sys/fcntl.h>
42#include <sys/kernel.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mount.h>
46#include <sys/namei.h>
47#include <sys/proc.h>
48#include <sys/vnode.h>
49#include <sys/stat.h>
50
51#include <fs/unionfs/union.h>
52
53static MALLOC_DEFINE(M_UNIONFSMNT, "UNIONFS mount", "UNIONFS mount structure");
54
55static vfs_fhtovp_t	unionfs_fhtovp;
56static vfs_checkexp_t	unionfs_checkexp;
57static vfs_mount_t	unionfs_domount;
58static vfs_quotactl_t	unionfs_quotactl;
59static vfs_root_t	unionfs_root;
60static vfs_sync_t	unionfs_sync;
61static vfs_statfs_t	unionfs_statfs;
62static vfs_unmount_t	unionfs_unmount;
63static vfs_vget_t	unionfs_vget;
64static vfs_extattrctl_t	unionfs_extattrctl;
65
66static struct vfsops unionfs_vfsops;
67
68/*
69 * Mount unionfs layer.
70 */
71static int
72unionfs_domount(struct mount *mp)
73{
74	struct vnode   *lowerrootvp;
75	struct vnode   *upperrootvp;
76	struct unionfs_mount *ump;
77	char           *target;
78	char           *tmp;
79	char           *ep;
80	struct nameidata nd, *ndp;
81	struct vattr	va;
82	unionfs_copymode copymode;
83	unionfs_whitemode whitemode;
84	int		below;
85	int		error;
86	int		len;
87	uid_t		uid;
88	gid_t		gid;
89	u_short		udir;
90	u_short		ufile;
91
92	UNIONFSDEBUG("unionfs_mount(mp = %p)\n", mp);
93
94	error = 0;
95	below = 0;
96	uid = 0;
97	gid = 0;
98	udir = 0;
99	ufile = 0;
100	copymode = UNIONFS_TRANSPARENT;	/* default */
101	whitemode = UNIONFS_WHITE_ALWAYS;
102	ndp = &nd;
103
104	if (mp->mnt_flag & MNT_ROOTFS) {
105		vfs_mount_error(mp, "Cannot union mount root filesystem");
106		return (EOPNOTSUPP);
107	}
108
109	/*
110	 * Update is a no operation.
111	 */
112	if (mp->mnt_flag & MNT_UPDATE) {
113		vfs_mount_error(mp, "unionfs does not support mount update");
114		return (EOPNOTSUPP);
115	}
116
117	/*
118	 * Get argument
119	 */
120	error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len);
121	if (error)
122		error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target,
123		    &len);
124	if (error || target[len - 1] != '\0') {
125		vfs_mount_error(mp, "Invalid target");
126		return (EINVAL);
127	}
128	if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0)
129		below = 1;
130	if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) {
131		if (tmp != NULL)
132			udir = (mode_t)strtol(tmp, &ep, 8);
133		if (tmp == NULL || *ep) {
134			vfs_mount_error(mp, "Invalid udir");
135			return (EINVAL);
136		}
137		udir &= S_IRWXU | S_IRWXG | S_IRWXO;
138	}
139	if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) {
140		if (tmp != NULL)
141			ufile = (mode_t)strtol(tmp, &ep, 8);
142		if (tmp == NULL || *ep) {
143			vfs_mount_error(mp, "Invalid ufile");
144			return (EINVAL);
145		}
146		ufile &= S_IRWXU | S_IRWXG | S_IRWXO;
147	}
148	/* check umask, uid and gid */
149	if (udir == 0 && ufile != 0)
150		udir = ufile;
151	if (ufile == 0 && udir != 0)
152		ufile = udir;
153
154	vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
155	error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
156	if (!error) {
157		if (udir == 0)
158			udir = va.va_mode;
159		if (ufile == 0)
160			ufile = va.va_mode;
161		uid = va.va_uid;
162		gid = va.va_gid;
163	}
164	VOP_UNLOCK(mp->mnt_vnodecovered);
165	if (error)
166		return (error);
167
168	if (mp->mnt_cred->cr_ruid == 0) {	/* root only */
169		if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp,
170		    NULL) == 0) {
171			if (tmp != NULL)
172				uid = (uid_t)strtol(tmp, &ep, 10);
173			if (tmp == NULL || *ep) {
174				vfs_mount_error(mp, "Invalid uid");
175				return (EINVAL);
176			}
177		}
178		if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp,
179		    NULL) == 0) {
180			if (tmp != NULL)
181				gid = (gid_t)strtol(tmp, &ep, 10);
182			if (tmp == NULL || *ep) {
183				vfs_mount_error(mp, "Invalid gid");
184				return (EINVAL);
185			}
186		}
187		if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp,
188		    NULL) == 0) {
189			if (tmp == NULL) {
190				vfs_mount_error(mp, "Invalid copymode");
191				return (EINVAL);
192			} else if (strcasecmp(tmp, "traditional") == 0)
193				copymode = UNIONFS_TRADITIONAL;
194			else if (strcasecmp(tmp, "transparent") == 0)
195				copymode = UNIONFS_TRANSPARENT;
196			else if (strcasecmp(tmp, "masquerade") == 0)
197				copymode = UNIONFS_MASQUERADE;
198			else {
199				vfs_mount_error(mp, "Invalid copymode");
200				return (EINVAL);
201			}
202		}
203		if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp,
204		    NULL) == 0) {
205			if (tmp == NULL) {
206				vfs_mount_error(mp, "Invalid whiteout mode");
207				return (EINVAL);
208			} else if (strcasecmp(tmp, "always") == 0)
209				whitemode = UNIONFS_WHITE_ALWAYS;
210			else if (strcasecmp(tmp, "whenneeded") == 0)
211				whitemode = UNIONFS_WHITE_WHENNEEDED;
212			else {
213				vfs_mount_error(mp, "Invalid whiteout mode");
214				return (EINVAL);
215			}
216		}
217	}
218	/* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */
219	if (copymode == UNIONFS_TRADITIONAL) {
220		uid = mp->mnt_cred->cr_ruid;
221		gid = mp->mnt_cred->cr_rgid;
222	}
223
224	UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid);
225	UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile);
226	UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode);
227
228	/*
229	 * Find upper node
230	 */
231	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target);
232	if ((error = namei(ndp)))
233		return (error);
234
235	NDFREE_PNBUF(ndp);
236
237	/* get root vnodes */
238	lowerrootvp = mp->mnt_vnodecovered;
239	upperrootvp = ndp->ni_vp;
240	KASSERT(lowerrootvp != NULL, ("%s: NULL lower root vp", __func__));
241	KASSERT(upperrootvp != NULL, ("%s: NULL upper root vp", __func__));
242
243	/* create unionfs_mount */
244	ump = malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT,
245	    M_WAITOK | M_ZERO);
246
247	/*
248	 * Save reference
249	 */
250	if (below) {
251		VOP_UNLOCK(upperrootvp);
252		vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY);
253		ump->um_lowervp = upperrootvp;
254		ump->um_uppervp = lowerrootvp;
255	} else {
256		ump->um_lowervp = lowerrootvp;
257		ump->um_uppervp = upperrootvp;
258	}
259	ump->um_rootvp = NULLVP;
260	ump->um_uid = uid;
261	ump->um_gid = gid;
262	ump->um_udir = udir;
263	ump->um_ufile = ufile;
264	ump->um_copymode = copymode;
265	ump->um_whitemode = whitemode;
266
267	mp->mnt_data = ump;
268
269	/*
270	 * Copy upper layer's RDONLY flag.
271	 */
272	mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY;
273
274	/*
275	 * Unlock the node
276	 */
277	VOP_UNLOCK(ump->um_uppervp);
278
279	/*
280	 * Get the unionfs root vnode.
281	 */
282	error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp,
283	    NULLVP, &(ump->um_rootvp), NULL);
284	if (error != 0) {
285		vrele(upperrootvp);
286		free(ump, M_UNIONFSMNT);
287		mp->mnt_data = NULL;
288		return (error);
289	}
290	KASSERT(ump->um_rootvp != NULL, ("rootvp cannot be NULL"));
291	KASSERT((ump->um_rootvp->v_vflag & VV_ROOT) != 0,
292	    ("%s: rootvp without VV_ROOT", __func__));
293
294	/*
295	 * Do not release the namei() reference on upperrootvp until after
296	 * we attempt to register the upper mounts.  A concurrent unmount
297	 * of the upper or lower FS may have caused unionfs_nodeget() to
298	 * create a unionfs node with a NULL upper or lower vp and with
299	 * no reference held on upperrootvp or lowerrootvp.
300	 * vfs_register_upper() should subsequently fail, which is what
301	 * we want, but we must ensure neither underlying vnode can be
302	 * reused until that happens.  We assume the caller holds a reference
303	 * to lowerrootvp as it is the mount's covered vnode.
304	 */
305	ump->um_lowermp = vfs_register_upper_from_vp(ump->um_lowervp, mp,
306	    &ump->um_lower_link);
307	ump->um_uppermp = vfs_register_upper_from_vp(ump->um_uppervp, mp,
308	    &ump->um_upper_link);
309
310	vrele(upperrootvp);
311
312	if (ump->um_lowermp == NULL || ump->um_uppermp == NULL) {
313		if (ump->um_lowermp != NULL)
314			vfs_unregister_upper(ump->um_lowermp, &ump->um_lower_link);
315		if (ump->um_uppermp != NULL)
316			vfs_unregister_upper(ump->um_uppermp, &ump->um_upper_link);
317		vflush(mp, 1, FORCECLOSE, curthread);
318		free(ump, M_UNIONFSMNT);
319		mp->mnt_data = NULL;
320		return (ENOENT);
321	}
322
323	/*
324	 * Specify that the covered vnode lock should remain held while
325	 * lookup() performs the cross-mount walk.  This prevents a lock-order
326	 * reversal between the covered vnode lock (which is also locked by
327	 * unionfs_lock()) and the mountpoint's busy count.  Without this,
328	 * unmount will lock the covered vnode lock (directly through the
329	 * covered vnode) and wait for the busy count to drain, while a
330	 * concurrent lookup will increment the busy count and then lock
331	 * the covered vnode lock (indirectly through unionfs_lock()).
332	 *
333	 * Note that we can't yet use this facility for the 'below' case
334	 * in which the upper vnode is the covered vnode, because that would
335	 * introduce a different LOR in which the cross-mount lookup would
336	 * effectively hold the upper vnode lock before acquiring the lower
337	 * vnode lock, while an unrelated lock operation would still acquire
338	 * the lower vnode lock before the upper vnode lock, which is the
339	 * order unionfs currently requires.
340	 */
341	if (!below) {
342		vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
343		mp->mnt_vnodecovered->v_vflag |= VV_CROSSLOCK;
344		VOP_UNLOCK(mp->mnt_vnodecovered);
345	}
346
347	MNT_ILOCK(mp);
348	if ((ump->um_lowermp->mnt_flag & MNT_LOCAL) != 0 &&
349	    (ump->um_uppermp->mnt_flag & MNT_LOCAL) != 0)
350		mp->mnt_flag |= MNT_LOCAL;
351	mp->mnt_kern_flag |= MNTK_NOMSYNC | MNTK_UNIONFS |
352	    (ump->um_uppermp->mnt_kern_flag & MNTK_SHARED_WRITES);
353	MNT_IUNLOCK(mp);
354
355	/*
356	 * Get new fsid
357	 */
358	vfs_getnewfsid(mp);
359
360	snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "<%s>:%s",
361	    below ? "below" : "above", target);
362
363	UNIONFSDEBUG("unionfs_mount: from %s, on %s\n",
364	    mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
365
366	return (0);
367}
368
369/*
370 * Free reference to unionfs layer
371 */
372static int
373unionfs_unmount(struct mount *mp, int mntflags)
374{
375	struct unionfs_mount *ump;
376	int		error;
377	int		num;
378	int		freeing;
379	int		flags;
380
381	UNIONFSDEBUG("unionfs_unmount: mp = %p\n", mp);
382
383	ump = MOUNTTOUNIONFSMOUNT(mp);
384	flags = 0;
385
386	if (mntflags & MNT_FORCE)
387		flags |= FORCECLOSE;
388
389	/* vflush (no need to call vrele) */
390	for (freeing = 0; (error = vflush(mp, 1, flags, curthread)) != 0;) {
391		num = mp->mnt_nvnodelistsize;
392		if (num == freeing)
393			break;
394		freeing = num;
395	}
396
397	if (error)
398		return (error);
399
400	vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE);
401	mp->mnt_vnodecovered->v_vflag &= ~VV_CROSSLOCK;
402	VOP_UNLOCK(mp->mnt_vnodecovered);
403	vfs_unregister_upper(ump->um_lowermp, &ump->um_lower_link);
404	vfs_unregister_upper(ump->um_uppermp, &ump->um_upper_link);
405	free(ump, M_UNIONFSMNT);
406	mp->mnt_data = NULL;
407
408	return (0);
409}
410
411static int
412unionfs_root(struct mount *mp, int flags, struct vnode **vpp)
413{
414	struct unionfs_mount *ump;
415	struct vnode *vp;
416
417	ump = MOUNTTOUNIONFSMOUNT(mp);
418	vp = ump->um_rootvp;
419
420	UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n",
421	    vp, VOP_ISLOCKED(vp));
422
423	vref(vp);
424	if (flags & LK_TYPE_MASK)
425		vn_lock(vp, flags);
426
427	*vpp = vp;
428
429	return (0);
430}
431
432static int
433unionfs_quotactl(struct mount *mp, int cmd, uid_t uid, void *arg,
434    bool *mp_busy)
435{
436	struct mount *uppermp;
437	struct unionfs_mount *ump;
438	int error;
439	bool unbusy;
440
441	ump = MOUNTTOUNIONFSMOUNT(mp);
442	/*
443	 * Issue a volatile load of um_uppermp here, as the mount may be
444	 * torn down after we call vfs_unbusy().
445	 */
446	uppermp = atomic_load_ptr(&ump->um_uppermp);
447	KASSERT(*mp_busy == true, ("upper mount not busy"));
448	/*
449	 * See comment in sys_quotactl() for an explanation of why the
450	 * lower mount needs to be busied by the caller of VFS_QUOTACTL()
451	 * but may be unbusied by the implementation.  We must unbusy
452	 * the upper mount for the same reason; otherwise a namei lookup
453	 * issued by the VFS_QUOTACTL() implementation could traverse the
454	 * upper mount and deadlock.
455	 */
456	vfs_unbusy(mp);
457	*mp_busy = false;
458	unbusy = true;
459	error = vfs_busy(uppermp, 0);
460	/*
461	 * Writing is always performed to upper vnode.
462	 */
463	if (error == 0)
464		error = VFS_QUOTACTL(uppermp, cmd, uid, arg, &unbusy);
465	if (unbusy)
466		vfs_unbusy(uppermp);
467
468	return (error);
469}
470
471static int
472unionfs_statfs(struct mount *mp, struct statfs *sbp)
473{
474	struct unionfs_mount *ump;
475	struct statfs	*mstat;
476	uint64_t	lbsize;
477	int		error;
478
479	ump = MOUNTTOUNIONFSMOUNT(mp);
480
481	UNIONFSDEBUG("unionfs_statfs(mp = %p, lvp = %p, uvp = %p)\n",
482	    mp, ump->um_lowervp, ump->um_uppervp);
483
484	mstat = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK | M_ZERO);
485
486	error = VFS_STATFS(ump->um_lowermp, mstat);
487	if (error) {
488		free(mstat, M_STATFS);
489		return (error);
490	}
491
492	/* now copy across the "interesting" information and fake the rest */
493	sbp->f_blocks = mstat->f_blocks;
494	sbp->f_files = mstat->f_files;
495
496	lbsize = mstat->f_bsize;
497
498	error = VFS_STATFS(ump->um_uppermp, mstat);
499	if (error) {
500		free(mstat, M_STATFS);
501		return (error);
502	}
503
504	/*
505	 * The FS type etc is copy from upper vfs.
506	 * (write able vfs have priority)
507	 */
508	sbp->f_type = mstat->f_type;
509	sbp->f_flags = mstat->f_flags;
510	sbp->f_bsize = mstat->f_bsize;
511	sbp->f_iosize = mstat->f_iosize;
512
513	if (mstat->f_bsize != lbsize)
514		sbp->f_blocks = ((off_t)sbp->f_blocks * lbsize) /
515		    mstat->f_bsize;
516
517	sbp->f_blocks += mstat->f_blocks;
518	sbp->f_bfree = mstat->f_bfree;
519	sbp->f_bavail = mstat->f_bavail;
520	sbp->f_files += mstat->f_files;
521	sbp->f_ffree = mstat->f_ffree;
522
523	free(mstat, M_STATFS);
524	return (0);
525}
526
527static int
528unionfs_sync(struct mount *mp, int waitfor)
529{
530	/* nothing to do */
531	return (0);
532}
533
534static int
535unionfs_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
536{
537	return (EOPNOTSUPP);
538}
539
540static int
541unionfs_fhtovp(struct mount *mp, struct fid *fidp, int flags,
542    struct vnode **vpp)
543{
544	return (EOPNOTSUPP);
545}
546
547static int
548unionfs_checkexp(struct mount *mp, struct sockaddr *nam, uint64_t *extflagsp,
549    struct ucred **credanonp, int *numsecflavors, int *secflavors)
550{
551	return (EOPNOTSUPP);
552}
553
554static int
555unionfs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
556    int namespace, const char *attrname)
557{
558	struct unionfs_mount *ump;
559	struct unionfs_node *unp;
560
561	ump = MOUNTTOUNIONFSMOUNT(mp);
562	unp = VTOUNIONFS(filename_vp);
563
564	if (unp->un_uppervp != NULLVP) {
565		return (VFS_EXTATTRCTL(ump->um_uppermp, cmd,
566		    unp->un_uppervp, namespace, attrname));
567	} else {
568		return (VFS_EXTATTRCTL(ump->um_lowermp, cmd,
569		    unp->un_lowervp, namespace, attrname));
570	}
571}
572
573static struct vfsops unionfs_vfsops = {
574	.vfs_checkexp =		unionfs_checkexp,
575	.vfs_extattrctl =	unionfs_extattrctl,
576	.vfs_fhtovp =		unionfs_fhtovp,
577	.vfs_init =		unionfs_init,
578	.vfs_mount =		unionfs_domount,
579	.vfs_quotactl =		unionfs_quotactl,
580	.vfs_root =		unionfs_root,
581	.vfs_statfs =		unionfs_statfs,
582	.vfs_sync =		unionfs_sync,
583	.vfs_uninit =		unionfs_uninit,
584	.vfs_unmount =		unionfs_unmount,
585	.vfs_vget =		unionfs_vget,
586};
587
588VFS_SET(unionfs_vfsops, unionfs, VFCF_LOOPBACK);
589