ffs_vfsops.c revision 270694
1/*-
2 * Copyright (c) 1989, 1991, 1993, 1994
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_vfsops.c 270694 2014-08-27 01:37:22Z kib $");
34
35#include "opt_quota.h"
36#include "opt_ufs.h"
37#include "opt_ffs.h"
38#include "opt_ddb.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/namei.h>
43#include <sys/priv.h>
44#include <sys/proc.h>
45#include <sys/kernel.h>
46#include <sys/vnode.h>
47#include <sys/mount.h>
48#include <sys/bio.h>
49#include <sys/buf.h>
50#include <sys/conf.h>
51#include <sys/fcntl.h>
52#include <sys/ioccom.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/rwlock.h>
56
57#include <security/mac/mac_framework.h>
58
59#include <ufs/ufs/extattr.h>
60#include <ufs/ufs/gjournal.h>
61#include <ufs/ufs/quota.h>
62#include <ufs/ufs/ufsmount.h>
63#include <ufs/ufs/inode.h>
64#include <ufs/ufs/ufs_extern.h>
65
66#include <ufs/ffs/fs.h>
67#include <ufs/ffs/ffs_extern.h>
68
69#include <vm/vm.h>
70#include <vm/uma.h>
71#include <vm/vm_page.h>
72
73#include <geom/geom.h>
74#include <geom/geom_vfs.h>
75
76#include <ddb/ddb.h>
77
78static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
79
80static int	ffs_mountfs(struct vnode *, struct mount *, struct thread *);
81static void	ffs_oldfscompat_read(struct fs *, struct ufsmount *,
82		    ufs2_daddr_t);
83static void	ffs_ifree(struct ufsmount *ump, struct inode *ip);
84static int	ffs_sync_lazy(struct mount *mp);
85
86static vfs_init_t ffs_init;
87static vfs_uninit_t ffs_uninit;
88static vfs_extattrctl_t ffs_extattrctl;
89static vfs_cmount_t ffs_cmount;
90static vfs_unmount_t ffs_unmount;
91static vfs_mount_t ffs_mount;
92static vfs_statfs_t ffs_statfs;
93static vfs_fhtovp_t ffs_fhtovp;
94static vfs_sync_t ffs_sync;
95
96static struct vfsops ufs_vfsops = {
97	.vfs_extattrctl =	ffs_extattrctl,
98	.vfs_fhtovp =		ffs_fhtovp,
99	.vfs_init =		ffs_init,
100	.vfs_mount =		ffs_mount,
101	.vfs_cmount =		ffs_cmount,
102	.vfs_quotactl =		ufs_quotactl,
103	.vfs_root =		ufs_root,
104	.vfs_statfs =		ffs_statfs,
105	.vfs_sync =		ffs_sync,
106	.vfs_uninit =		ffs_uninit,
107	.vfs_unmount =		ffs_unmount,
108	.vfs_vget =		ffs_vget,
109	.vfs_susp_clean =	process_deferred_inactive,
110};
111
112VFS_SET(ufs_vfsops, ufs, 0);
113MODULE_VERSION(ufs, 1);
114
115static b_strategy_t ffs_geom_strategy;
116static b_write_t ffs_bufwrite;
117
118static struct buf_ops ffs_ops = {
119	.bop_name =	"FFS",
120	.bop_write =	ffs_bufwrite,
121	.bop_strategy =	ffs_geom_strategy,
122	.bop_sync =	bufsync,
123#ifdef NO_FFS_SNAPSHOT
124	.bop_bdflush =	bufbdflush,
125#else
126	.bop_bdflush =	ffs_bdflush,
127#endif
128};
129
130/*
131 * Note that userquota and groupquota options are not currently used
132 * by UFS/FFS code and generally mount(8) does not pass those options
133 * from userland, but they can be passed by loader(8) via
134 * vfs.root.mountfrom.options.
135 */
136static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
137    "noclusterw", "noexec", "export", "force", "from", "groupquota",
138    "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir",
139    "nosymfollow", "sync", "union", "userquota", NULL };
140
141static int
142ffs_mount(struct mount *mp)
143{
144	struct vnode *devvp;
145	struct thread *td;
146	struct ufsmount *ump = NULL;
147	struct fs *fs;
148	pid_t fsckpid = 0;
149	int error, flags;
150	uint64_t mntorflags;
151	accmode_t accmode;
152	struct nameidata ndp;
153	char *fspec;
154
155	td = curthread;
156	if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
157		return (EINVAL);
158	if (uma_inode == NULL) {
159		uma_inode = uma_zcreate("FFS inode",
160		    sizeof(struct inode), NULL, NULL, NULL, NULL,
161		    UMA_ALIGN_PTR, 0);
162		uma_ufs1 = uma_zcreate("FFS1 dinode",
163		    sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
164		    UMA_ALIGN_PTR, 0);
165		uma_ufs2 = uma_zcreate("FFS2 dinode",
166		    sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
167		    UMA_ALIGN_PTR, 0);
168	}
169
170	vfs_deleteopt(mp->mnt_optnew, "groupquota");
171	vfs_deleteopt(mp->mnt_optnew, "userquota");
172
173	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
174	if (error)
175		return (error);
176
177	mntorflags = 0;
178	if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
179		mntorflags |= MNT_ACLS;
180
181	if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
182		mntorflags |= MNT_SNAPSHOT;
183		/*
184		 * Once we have set the MNT_SNAPSHOT flag, do not
185		 * persist "snapshot" in the options list.
186		 */
187		vfs_deleteopt(mp->mnt_optnew, "snapshot");
188		vfs_deleteopt(mp->mnt_opt, "snapshot");
189	}
190
191	if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 &&
192	    vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) {
193		/*
194		 * Once we have set the restricted PID, do not
195		 * persist "fsckpid" in the options list.
196		 */
197		vfs_deleteopt(mp->mnt_optnew, "fsckpid");
198		vfs_deleteopt(mp->mnt_opt, "fsckpid");
199		if (mp->mnt_flag & MNT_UPDATE) {
200			if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 &&
201			     vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
202				vfs_mount_error(mp,
203				    "Checker enable: Must be read-only");
204				return (EINVAL);
205			}
206		} else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
207			vfs_mount_error(mp,
208			    "Checker enable: Must be read-only");
209			return (EINVAL);
210		}
211		/* Set to -1 if we are done */
212		if (fsckpid == 0)
213			fsckpid = -1;
214	}
215
216	if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
217		if (mntorflags & MNT_ACLS) {
218			vfs_mount_error(mp,
219			    "\"acls\" and \"nfsv4acls\" options "
220			    "are mutually exclusive");
221			return (EINVAL);
222		}
223		mntorflags |= MNT_NFS4ACLS;
224	}
225
226	MNT_ILOCK(mp);
227	mp->mnt_flag |= mntorflags;
228	MNT_IUNLOCK(mp);
229	/*
230	 * If updating, check whether changing from read-only to
231	 * read/write; if there is no device name, that's all we do.
232	 */
233	if (mp->mnt_flag & MNT_UPDATE) {
234		ump = VFSTOUFS(mp);
235		fs = ump->um_fs;
236		devvp = ump->um_devvp;
237		if (fsckpid == -1 && ump->um_fsckpid > 0) {
238			if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 ||
239			    (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0)
240				return (error);
241			DROP_GIANT();
242			g_topology_lock();
243			/*
244			 * Return to normal read-only mode.
245			 */
246			error = g_access(ump->um_cp, 0, -1, 0);
247			g_topology_unlock();
248			PICKUP_GIANT();
249			ump->um_fsckpid = 0;
250		}
251		if (fs->fs_ronly == 0 &&
252		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
253			/*
254			 * Flush any dirty data and suspend filesystem.
255			 */
256			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
257				return (error);
258			error = vfs_write_suspend_umnt(mp);
259			if (error != 0)
260				return (error);
261			/*
262			 * Check for and optionally get rid of files open
263			 * for writing.
264			 */
265			flags = WRITECLOSE;
266			if (mp->mnt_flag & MNT_FORCE)
267				flags |= FORCECLOSE;
268			if (MOUNTEDSOFTDEP(mp)) {
269				error = softdep_flushfiles(mp, flags, td);
270			} else {
271				error = ffs_flushfiles(mp, flags, td);
272			}
273			if (error) {
274				vfs_write_resume(mp, 0);
275				return (error);
276			}
277			if (fs->fs_pendingblocks != 0 ||
278			    fs->fs_pendinginodes != 0) {
279				printf("WARNING: %s Update error: blocks %jd "
280				    "files %d\n", fs->fs_fsmnt,
281				    (intmax_t)fs->fs_pendingblocks,
282				    fs->fs_pendinginodes);
283				fs->fs_pendingblocks = 0;
284				fs->fs_pendinginodes = 0;
285			}
286			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
287				fs->fs_clean = 1;
288			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
289				fs->fs_ronly = 0;
290				fs->fs_clean = 0;
291				vfs_write_resume(mp, 0);
292				return (error);
293			}
294			if (MOUNTEDSOFTDEP(mp))
295				softdep_unmount(mp);
296			DROP_GIANT();
297			g_topology_lock();
298			/*
299			 * Drop our write and exclusive access.
300			 */
301			g_access(ump->um_cp, 0, -1, -1);
302			g_topology_unlock();
303			PICKUP_GIANT();
304			fs->fs_ronly = 1;
305			MNT_ILOCK(mp);
306			mp->mnt_flag |= MNT_RDONLY;
307			MNT_IUNLOCK(mp);
308			/*
309			 * Allow the writers to note that filesystem
310			 * is ro now.
311			 */
312			vfs_write_resume(mp, 0);
313		}
314		if ((mp->mnt_flag & MNT_RELOAD) &&
315		    (error = ffs_reload(mp, td, 0)) != 0)
316			return (error);
317		if (fs->fs_ronly &&
318		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
319			/*
320			 * If we are running a checker, do not allow upgrade.
321			 */
322			if (ump->um_fsckpid > 0) {
323				vfs_mount_error(mp,
324				    "Active checker, cannot upgrade to write");
325				return (EINVAL);
326			}
327			/*
328			 * If upgrade to read-write by non-root, then verify
329			 * that user has necessary permissions on the device.
330			 */
331			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
332			error = VOP_ACCESS(devvp, VREAD | VWRITE,
333			    td->td_ucred, td);
334			if (error)
335				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
336			if (error) {
337				VOP_UNLOCK(devvp, 0);
338				return (error);
339			}
340			VOP_UNLOCK(devvp, 0);
341			fs->fs_flags &= ~FS_UNCLEAN;
342			if (fs->fs_clean == 0) {
343				fs->fs_flags |= FS_UNCLEAN;
344				if ((mp->mnt_flag & MNT_FORCE) ||
345				    ((fs->fs_flags &
346				     (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
347				     (fs->fs_flags & FS_DOSOFTDEP))) {
348					printf("WARNING: %s was not properly "
349					   "dismounted\n", fs->fs_fsmnt);
350				} else {
351					vfs_mount_error(mp,
352					   "R/W mount of %s denied. %s.%s",
353					   fs->fs_fsmnt,
354					   "Filesystem is not clean - run fsck",
355					   (fs->fs_flags & FS_SUJ) == 0 ? "" :
356					   " Forced mount will invalidate"
357					   " journal contents");
358					return (EPERM);
359				}
360			}
361			DROP_GIANT();
362			g_topology_lock();
363			/*
364			 * Request exclusive write access.
365			 */
366			error = g_access(ump->um_cp, 0, 1, 1);
367			g_topology_unlock();
368			PICKUP_GIANT();
369			if (error)
370				return (error);
371			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
372				return (error);
373			fs->fs_ronly = 0;
374			MNT_ILOCK(mp);
375			mp->mnt_flag &= ~MNT_RDONLY;
376			MNT_IUNLOCK(mp);
377			fs->fs_mtime = time_second;
378			/* check to see if we need to start softdep */
379			if ((fs->fs_flags & FS_DOSOFTDEP) &&
380			    (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
381				vn_finished_write(mp);
382				return (error);
383			}
384			fs->fs_clean = 0;
385			if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
386				vn_finished_write(mp);
387				return (error);
388			}
389			if (fs->fs_snapinum[0] != 0)
390				ffs_snapshot_mount(mp);
391			vn_finished_write(mp);
392		}
393		/*
394		 * Soft updates is incompatible with "async",
395		 * so if we are doing softupdates stop the user
396		 * from setting the async flag in an update.
397		 * Softdep_mount() clears it in an initial mount
398		 * or ro->rw remount.
399		 */
400		if (MOUNTEDSOFTDEP(mp)) {
401			/* XXX: Reset too late ? */
402			MNT_ILOCK(mp);
403			mp->mnt_flag &= ~MNT_ASYNC;
404			MNT_IUNLOCK(mp);
405		}
406		/*
407		 * Keep MNT_ACLS flag if it is stored in superblock.
408		 */
409		if ((fs->fs_flags & FS_ACLS) != 0) {
410			/* XXX: Set too late ? */
411			MNT_ILOCK(mp);
412			mp->mnt_flag |= MNT_ACLS;
413			MNT_IUNLOCK(mp);
414		}
415
416		if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
417			/* XXX: Set too late ? */
418			MNT_ILOCK(mp);
419			mp->mnt_flag |= MNT_NFS4ACLS;
420			MNT_IUNLOCK(mp);
421		}
422		/*
423		 * If this is a request from fsck to clean up the filesystem,
424		 * then allow the specified pid to proceed.
425		 */
426		if (fsckpid > 0) {
427			if (ump->um_fsckpid != 0) {
428				vfs_mount_error(mp,
429				    "Active checker already running on %s",
430				    fs->fs_fsmnt);
431				return (EINVAL);
432			}
433			KASSERT(MOUNTEDSOFTDEP(mp) == 0,
434			    ("soft updates enabled on read-only file system"));
435			DROP_GIANT();
436			g_topology_lock();
437			/*
438			 * Request write access.
439			 */
440			error = g_access(ump->um_cp, 0, 1, 0);
441			g_topology_unlock();
442			PICKUP_GIANT();
443			if (error) {
444				vfs_mount_error(mp,
445				    "Checker activation failed on %s",
446				    fs->fs_fsmnt);
447				return (error);
448			}
449			ump->um_fsckpid = fsckpid;
450			if (fs->fs_snapinum[0] != 0)
451				ffs_snapshot_mount(mp);
452			fs->fs_mtime = time_second;
453			fs->fs_fmod = 1;
454			fs->fs_clean = 0;
455			(void) ffs_sbupdate(ump, MNT_WAIT, 0);
456		}
457
458		/*
459		 * If this is a snapshot request, take the snapshot.
460		 */
461		if (mp->mnt_flag & MNT_SNAPSHOT)
462			return (ffs_snapshot(mp, fspec));
463	}
464
465	/*
466	 * Not an update, or updating the name: look up the name
467	 * and verify that it refers to a sensible disk device.
468	 */
469	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
470	if ((error = namei(&ndp)) != 0)
471		return (error);
472	NDFREE(&ndp, NDF_ONLY_PNBUF);
473	devvp = ndp.ni_vp;
474	if (!vn_isdisk(devvp, &error)) {
475		vput(devvp);
476		return (error);
477	}
478
479	/*
480	 * If mount by non-root, then verify that user has necessary
481	 * permissions on the device.
482	 */
483	accmode = VREAD;
484	if ((mp->mnt_flag & MNT_RDONLY) == 0)
485		accmode |= VWRITE;
486	error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
487	if (error)
488		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
489	if (error) {
490		vput(devvp);
491		return (error);
492	}
493
494	if (mp->mnt_flag & MNT_UPDATE) {
495		/*
496		 * Update only
497		 *
498		 * If it's not the same vnode, or at least the same device
499		 * then it's not correct.
500		 */
501
502		if (devvp->v_rdev != ump->um_devvp->v_rdev)
503			error = EINVAL;	/* needs translation */
504		vput(devvp);
505		if (error)
506			return (error);
507	} else {
508		/*
509		 * New mount
510		 *
511		 * We need the name for the mount point (also used for
512		 * "last mounted on") copied in. If an error occurs,
513		 * the mount point is discarded by the upper level code.
514		 * Note that vfs_mount() populates f_mntonname for us.
515		 */
516		if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
517			vrele(devvp);
518			return (error);
519		}
520		if (fsckpid > 0) {
521			KASSERT(MOUNTEDSOFTDEP(mp) == 0,
522			    ("soft updates enabled on read-only file system"));
523			ump = VFSTOUFS(mp);
524			fs = ump->um_fs;
525			DROP_GIANT();
526			g_topology_lock();
527			/*
528			 * Request write access.
529			 */
530			error = g_access(ump->um_cp, 0, 1, 0);
531			g_topology_unlock();
532			PICKUP_GIANT();
533			if (error) {
534				printf("WARNING: %s: Checker activation "
535				    "failed\n", fs->fs_fsmnt);
536			} else {
537				ump->um_fsckpid = fsckpid;
538				if (fs->fs_snapinum[0] != 0)
539					ffs_snapshot_mount(mp);
540				fs->fs_mtime = time_second;
541				fs->fs_clean = 0;
542				(void) ffs_sbupdate(ump, MNT_WAIT, 0);
543			}
544		}
545	}
546	vfs_mountedfrom(mp, fspec);
547	return (0);
548}
549
550/*
551 * Compatibility with old mount system call.
552 */
553
554static int
555ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
556{
557	struct ufs_args args;
558	struct export_args exp;
559	int error;
560
561	if (data == NULL)
562		return (EINVAL);
563	error = copyin(data, &args, sizeof args);
564	if (error)
565		return (error);
566	vfs_oexport_conv(&args.export, &exp);
567
568	ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
569	ma = mount_arg(ma, "export", &exp, sizeof(exp));
570	error = kernel_mount(ma, flags);
571
572	return (error);
573}
574
575/*
576 * Reload all incore data for a filesystem (used after running fsck on
577 * the root filesystem and finding things to fix). If the 'force' flag
578 * is 0, the filesystem must be mounted read-only.
579 *
580 * Things to do to update the mount:
581 *	1) invalidate all cached meta-data.
582 *	2) re-read superblock from disk.
583 *	3) re-read summary information from disk.
584 *	4) invalidate all inactive vnodes.
585 *	5) invalidate all cached file data.
586 *	6) re-read inode data for all active vnodes.
587 */
588int
589ffs_reload(struct mount *mp, struct thread *td, int force)
590{
591	struct vnode *vp, *mvp, *devvp;
592	struct inode *ip;
593	void *space;
594	struct buf *bp;
595	struct fs *fs, *newfs;
596	struct ufsmount *ump;
597	ufs2_daddr_t sblockloc;
598	int i, blks, size, error;
599	int32_t *lp;
600
601	ump = VFSTOUFS(mp);
602
603	MNT_ILOCK(mp);
604	if ((mp->mnt_flag & MNT_RDONLY) == 0 && force == 0) {
605		MNT_IUNLOCK(mp);
606		return (EINVAL);
607	}
608	MNT_IUNLOCK(mp);
609
610	/*
611	 * Step 1: invalidate all cached meta-data.
612	 */
613	devvp = VFSTOUFS(mp)->um_devvp;
614	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
615	if (vinvalbuf(devvp, 0, 0, 0) != 0)
616		panic("ffs_reload: dirty1");
617	VOP_UNLOCK(devvp, 0);
618
619	/*
620	 * Step 2: re-read superblock from disk.
621	 */
622	fs = VFSTOUFS(mp)->um_fs;
623	if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
624	    NOCRED, &bp)) != 0)
625		return (error);
626	newfs = (struct fs *)bp->b_data;
627	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
628	     newfs->fs_magic != FS_UFS2_MAGIC) ||
629	    newfs->fs_bsize > MAXBSIZE ||
630	    newfs->fs_bsize < sizeof(struct fs)) {
631			brelse(bp);
632			return (EIO);		/* XXX needs translation */
633	}
634	/*
635	 * Copy pointer fields back into superblock before copying in	XXX
636	 * new superblock. These should really be in the ufsmount.	XXX
637	 * Note that important parameters (eg fs_ncg) are unchanged.
638	 */
639	newfs->fs_csp = fs->fs_csp;
640	newfs->fs_maxcluster = fs->fs_maxcluster;
641	newfs->fs_contigdirs = fs->fs_contigdirs;
642	newfs->fs_active = fs->fs_active;
643	newfs->fs_ronly = fs->fs_ronly;
644	sblockloc = fs->fs_sblockloc;
645	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
646	brelse(bp);
647	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
648	ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
649	UFS_LOCK(ump);
650	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
651		printf("WARNING: %s: reload pending error: blocks %jd "
652		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
653		    fs->fs_pendinginodes);
654		fs->fs_pendingblocks = 0;
655		fs->fs_pendinginodes = 0;
656	}
657	UFS_UNLOCK(ump);
658
659	/*
660	 * Step 3: re-read summary information from disk.
661	 */
662	size = fs->fs_cssize;
663	blks = howmany(size, fs->fs_fsize);
664	if (fs->fs_contigsumsize > 0)
665		size += fs->fs_ncg * sizeof(int32_t);
666	size += fs->fs_ncg * sizeof(u_int8_t);
667	free(fs->fs_csp, M_UFSMNT);
668	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
669	fs->fs_csp = space;
670	for (i = 0; i < blks; i += fs->fs_frag) {
671		size = fs->fs_bsize;
672		if (i + fs->fs_frag > blks)
673			size = (blks - i) * fs->fs_fsize;
674		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
675		    NOCRED, &bp);
676		if (error)
677			return (error);
678		bcopy(bp->b_data, space, (u_int)size);
679		space = (char *)space + size;
680		brelse(bp);
681	}
682	/*
683	 * We no longer know anything about clusters per cylinder group.
684	 */
685	if (fs->fs_contigsumsize > 0) {
686		fs->fs_maxcluster = lp = space;
687		for (i = 0; i < fs->fs_ncg; i++)
688			*lp++ = fs->fs_contigsumsize;
689		space = lp;
690	}
691	size = fs->fs_ncg * sizeof(u_int8_t);
692	fs->fs_contigdirs = (u_int8_t *)space;
693	bzero(fs->fs_contigdirs, size);
694
695loop:
696	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
697		/*
698		 * Skip syncer vnode.
699		 */
700		if (vp->v_type == VNON) {
701			VI_UNLOCK(vp);
702			continue;
703		}
704		/*
705		 * Step 4: invalidate all cached file data.
706		 */
707		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
708			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
709			goto loop;
710		}
711		if (vinvalbuf(vp, 0, 0, 0))
712			panic("ffs_reload: dirty2");
713		/*
714		 * Step 5: re-read inode data for all active vnodes.
715		 */
716		ip = VTOI(vp);
717		error =
718		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
719		    (int)fs->fs_bsize, NOCRED, &bp);
720		if (error) {
721			VOP_UNLOCK(vp, 0);
722			vrele(vp);
723			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
724			return (error);
725		}
726		ffs_load_inode(bp, ip, fs, ip->i_number);
727		ip->i_effnlink = ip->i_nlink;
728		brelse(bp);
729		VOP_UNLOCK(vp, 0);
730		vrele(vp);
731	}
732	return (0);
733}
734
735/*
736 * Possible superblock locations ordered from most to least likely.
737 */
738static int sblock_try[] = SBLOCKSEARCH;
739
740/*
741 * Common code for mount and mountroot
742 */
743static int
744ffs_mountfs(devvp, mp, td)
745	struct vnode *devvp;
746	struct mount *mp;
747	struct thread *td;
748{
749	struct ufsmount *ump;
750	struct buf *bp;
751	struct fs *fs;
752	struct cdev *dev;
753	void *space;
754	ufs2_daddr_t sblockloc;
755	int error, i, blks, size, ronly;
756	int32_t *lp;
757	struct ucred *cred;
758	struct g_consumer *cp;
759	struct mount *nmp;
760
761	bp = NULL;
762	ump = NULL;
763	cred = td ? td->td_ucred : NOCRED;
764	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
765
766	dev = devvp->v_rdev;
767	dev_ref(dev);
768	DROP_GIANT();
769	g_topology_lock();
770	error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
771	g_topology_unlock();
772	PICKUP_GIANT();
773	VOP_UNLOCK(devvp, 0);
774	if (error)
775		goto out;
776	if (devvp->v_rdev->si_iosize_max != 0)
777		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
778	if (mp->mnt_iosize_max > MAXPHYS)
779		mp->mnt_iosize_max = MAXPHYS;
780
781	devvp->v_bufobj.bo_ops = &ffs_ops;
782
783	fs = NULL;
784	sblockloc = 0;
785	/*
786	 * Try reading the superblock in each of its possible locations.
787	 */
788	for (i = 0; sblock_try[i] != -1; i++) {
789		if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
790			error = EINVAL;
791			vfs_mount_error(mp,
792			    "Invalid sectorsize %d for superblock size %d",
793			    cp->provider->sectorsize, SBLOCKSIZE);
794			goto out;
795		}
796		if ((error = bread(devvp, btodb(sblock_try[i]), SBLOCKSIZE,
797		    cred, &bp)) != 0)
798			goto out;
799		fs = (struct fs *)bp->b_data;
800		sblockloc = sblock_try[i];
801		if ((fs->fs_magic == FS_UFS1_MAGIC ||
802		     (fs->fs_magic == FS_UFS2_MAGIC &&
803		      (fs->fs_sblockloc == sblockloc ||
804		       (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) &&
805		    fs->fs_bsize <= MAXBSIZE &&
806		    fs->fs_bsize >= sizeof(struct fs))
807			break;
808		brelse(bp);
809		bp = NULL;
810	}
811	if (sblock_try[i] == -1) {
812		error = EINVAL;		/* XXX needs translation */
813		goto out;
814	}
815	fs->fs_fmod = 0;
816	fs->fs_flags &= ~FS_INDEXDIRS;	/* no support for directory indicies */
817	fs->fs_flags &= ~FS_UNCLEAN;
818	if (fs->fs_clean == 0) {
819		fs->fs_flags |= FS_UNCLEAN;
820		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
821		    ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
822		     (fs->fs_flags & FS_DOSOFTDEP))) {
823			printf("WARNING: %s was not properly dismounted\n",
824			    fs->fs_fsmnt);
825		} else {
826			vfs_mount_error(mp, "R/W mount of %s denied. %s%s",
827			    fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
828			    (fs->fs_flags & FS_SUJ) == 0 ? "" :
829			    " Forced mount will invalidate journal contents");
830			error = EPERM;
831			goto out;
832		}
833		if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
834		    (mp->mnt_flag & MNT_FORCE)) {
835			printf("WARNING: %s: lost blocks %jd files %d\n",
836			    fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
837			    fs->fs_pendinginodes);
838			fs->fs_pendingblocks = 0;
839			fs->fs_pendinginodes = 0;
840		}
841	}
842	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
843		printf("WARNING: %s: mount pending error: blocks %jd "
844		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
845		    fs->fs_pendinginodes);
846		fs->fs_pendingblocks = 0;
847		fs->fs_pendinginodes = 0;
848	}
849	if ((fs->fs_flags & FS_GJOURNAL) != 0) {
850#ifdef UFS_GJOURNAL
851		/*
852		 * Get journal provider name.
853		 */
854		size = 1024;
855		mp->mnt_gjprovider = malloc(size, M_UFSMNT, M_WAITOK);
856		if (g_io_getattr("GJOURNAL::provider", cp, &size,
857		    mp->mnt_gjprovider) == 0) {
858			mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, size,
859			    M_UFSMNT, M_WAITOK);
860			MNT_ILOCK(mp);
861			mp->mnt_flag |= MNT_GJOURNAL;
862			MNT_IUNLOCK(mp);
863		} else {
864			printf("WARNING: %s: GJOURNAL flag on fs "
865			    "but no gjournal provider below\n",
866			    mp->mnt_stat.f_mntonname);
867			free(mp->mnt_gjprovider, M_UFSMNT);
868			mp->mnt_gjprovider = NULL;
869		}
870#else
871		printf("WARNING: %s: GJOURNAL flag on fs but no "
872		    "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
873#endif
874	} else {
875		mp->mnt_gjprovider = NULL;
876	}
877	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
878	ump->um_cp = cp;
879	ump->um_bo = &devvp->v_bufobj;
880	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK);
881	if (fs->fs_magic == FS_UFS1_MAGIC) {
882		ump->um_fstype = UFS1;
883		ump->um_balloc = ffs_balloc_ufs1;
884	} else {
885		ump->um_fstype = UFS2;
886		ump->um_balloc = ffs_balloc_ufs2;
887	}
888	ump->um_blkatoff = ffs_blkatoff;
889	ump->um_truncate = ffs_truncate;
890	ump->um_update = ffs_update;
891	ump->um_valloc = ffs_valloc;
892	ump->um_vfree = ffs_vfree;
893	ump->um_ifree = ffs_ifree;
894	ump->um_rdonly = ffs_rdonly;
895	ump->um_snapgone = ffs_snapgone;
896	mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
897	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
898	if (fs->fs_sbsize < SBLOCKSIZE)
899		bp->b_flags |= B_INVAL | B_NOCACHE;
900	brelse(bp);
901	bp = NULL;
902	fs = ump->um_fs;
903	ffs_oldfscompat_read(fs, ump, sblockloc);
904	fs->fs_ronly = ronly;
905	size = fs->fs_cssize;
906	blks = howmany(size, fs->fs_fsize);
907	if (fs->fs_contigsumsize > 0)
908		size += fs->fs_ncg * sizeof(int32_t);
909	size += fs->fs_ncg * sizeof(u_int8_t);
910	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
911	fs->fs_csp = space;
912	for (i = 0; i < blks; i += fs->fs_frag) {
913		size = fs->fs_bsize;
914		if (i + fs->fs_frag > blks)
915			size = (blks - i) * fs->fs_fsize;
916		if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
917		    cred, &bp)) != 0) {
918			free(fs->fs_csp, M_UFSMNT);
919			goto out;
920		}
921		bcopy(bp->b_data, space, (u_int)size);
922		space = (char *)space + size;
923		brelse(bp);
924		bp = NULL;
925	}
926	if (fs->fs_contigsumsize > 0) {
927		fs->fs_maxcluster = lp = space;
928		for (i = 0; i < fs->fs_ncg; i++)
929			*lp++ = fs->fs_contigsumsize;
930		space = lp;
931	}
932	size = fs->fs_ncg * sizeof(u_int8_t);
933	fs->fs_contigdirs = (u_int8_t *)space;
934	bzero(fs->fs_contigdirs, size);
935	fs->fs_active = NULL;
936	mp->mnt_data = ump;
937	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
938	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
939	nmp = NULL;
940	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
941	    (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
942		if (nmp)
943			vfs_rel(nmp);
944		vfs_getnewfsid(mp);
945	}
946	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
947	MNT_ILOCK(mp);
948	mp->mnt_flag |= MNT_LOCAL;
949	MNT_IUNLOCK(mp);
950	if ((fs->fs_flags & FS_MULTILABEL) != 0) {
951#ifdef MAC
952		MNT_ILOCK(mp);
953		mp->mnt_flag |= MNT_MULTILABEL;
954		MNT_IUNLOCK(mp);
955#else
956		printf("WARNING: %s: multilabel flag on fs but "
957		    "no MAC support\n", mp->mnt_stat.f_mntonname);
958#endif
959	}
960	if ((fs->fs_flags & FS_ACLS) != 0) {
961#ifdef UFS_ACL
962		MNT_ILOCK(mp);
963
964		if (mp->mnt_flag & MNT_NFS4ACLS)
965			printf("WARNING: %s: ACLs flag on fs conflicts with "
966			    "\"nfsv4acls\" mount option; option ignored\n",
967			    mp->mnt_stat.f_mntonname);
968		mp->mnt_flag &= ~MNT_NFS4ACLS;
969		mp->mnt_flag |= MNT_ACLS;
970
971		MNT_IUNLOCK(mp);
972#else
973		printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
974		    mp->mnt_stat.f_mntonname);
975#endif
976	}
977	if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
978#ifdef UFS_ACL
979		MNT_ILOCK(mp);
980
981		if (mp->mnt_flag & MNT_ACLS)
982			printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
983			    "with \"acls\" mount option; option ignored\n",
984			    mp->mnt_stat.f_mntonname);
985		mp->mnt_flag &= ~MNT_ACLS;
986		mp->mnt_flag |= MNT_NFS4ACLS;
987
988		MNT_IUNLOCK(mp);
989#else
990		printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
991		    "ACLs support\n", mp->mnt_stat.f_mntonname);
992#endif
993	}
994	if ((fs->fs_flags & FS_TRIM) != 0) {
995		size = sizeof(int);
996		if (g_io_getattr("GEOM::candelete", cp, &size,
997		    &ump->um_candelete) == 0) {
998			if (!ump->um_candelete)
999				printf("WARNING: %s: TRIM flag on fs but disk "
1000				    "does not support TRIM\n",
1001				    mp->mnt_stat.f_mntonname);
1002		} else {
1003			printf("WARNING: %s: TRIM flag on fs but disk does "
1004			    "not confirm that it supports TRIM\n",
1005			    mp->mnt_stat.f_mntonname);
1006			ump->um_candelete = 0;
1007		}
1008	}
1009
1010	ump->um_mountp = mp;
1011	ump->um_dev = dev;
1012	ump->um_devvp = devvp;
1013	ump->um_nindir = fs->fs_nindir;
1014	ump->um_bptrtodb = fs->fs_fsbtodb;
1015	ump->um_seqinc = fs->fs_frag;
1016	for (i = 0; i < MAXQUOTAS; i++)
1017		ump->um_quotas[i] = NULLVP;
1018#ifdef UFS_EXTATTR
1019	ufs_extattr_uepm_init(&ump->um_extattr);
1020#endif
1021	/*
1022	 * Set FS local "last mounted on" information (NULL pad)
1023	 */
1024	bzero(fs->fs_fsmnt, MAXMNTLEN);
1025	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1026	mp->mnt_stat.f_iosize = fs->fs_bsize;
1027
1028	if (mp->mnt_flag & MNT_ROOTFS) {
1029		/*
1030		 * Root mount; update timestamp in mount structure.
1031		 * this will be used by the common root mount code
1032		 * to update the system clock.
1033		 */
1034		mp->mnt_time = fs->fs_time;
1035	}
1036
1037	if (ronly == 0) {
1038		fs->fs_mtime = time_second;
1039		if ((fs->fs_flags & FS_DOSOFTDEP) &&
1040		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1041			free(fs->fs_csp, M_UFSMNT);
1042			ffs_flushfiles(mp, FORCECLOSE, td);
1043			goto out;
1044		}
1045		if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
1046			devvp->v_rdev->si_mountpt = mp;
1047		if (fs->fs_snapinum[0] != 0)
1048			ffs_snapshot_mount(mp);
1049		fs->fs_fmod = 1;
1050		fs->fs_clean = 0;
1051		(void) ffs_sbupdate(ump, MNT_WAIT, 0);
1052	}
1053	/*
1054	 * Initialize filesystem stat information in mount struct.
1055	 */
1056	MNT_ILOCK(mp);
1057	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1058	    MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS;
1059	MNT_IUNLOCK(mp);
1060#ifdef UFS_EXTATTR
1061#ifdef UFS_EXTATTR_AUTOSTART
1062	/*
1063	 *
1064	 * Auto-starting does the following:
1065	 *	- check for /.attribute in the fs, and extattr_start if so
1066	 *	- for each file in .attribute, enable that file with
1067	 * 	  an attribute of the same name.
1068	 * Not clear how to report errors -- probably eat them.
1069	 * This would all happen while the filesystem was busy/not
1070	 * available, so would effectively be "atomic".
1071	 */
1072	(void) ufs_extattr_autostart(mp, td);
1073#endif /* !UFS_EXTATTR_AUTOSTART */
1074#endif /* !UFS_EXTATTR */
1075	return (0);
1076out:
1077	if (bp)
1078		brelse(bp);
1079	if (cp != NULL) {
1080		DROP_GIANT();
1081		g_topology_lock();
1082		g_vfs_close(cp);
1083		g_topology_unlock();
1084		PICKUP_GIANT();
1085	}
1086	if (ump) {
1087		mtx_destroy(UFS_MTX(ump));
1088		if (mp->mnt_gjprovider != NULL) {
1089			free(mp->mnt_gjprovider, M_UFSMNT);
1090			mp->mnt_gjprovider = NULL;
1091		}
1092		free(ump->um_fs, M_UFSMNT);
1093		free(ump, M_UFSMNT);
1094		mp->mnt_data = NULL;
1095	}
1096	dev_rel(dev);
1097	return (error);
1098}
1099
1100#include <sys/sysctl.h>
1101static int bigcgs = 0;
1102SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1103
1104/*
1105 * Sanity checks for loading old filesystem superblocks.
1106 * See ffs_oldfscompat_write below for unwound actions.
1107 *
1108 * XXX - Parts get retired eventually.
1109 * Unfortunately new bits get added.
1110 */
1111static void
1112ffs_oldfscompat_read(fs, ump, sblockloc)
1113	struct fs *fs;
1114	struct ufsmount *ump;
1115	ufs2_daddr_t sblockloc;
1116{
1117	off_t maxfilesize;
1118
1119	/*
1120	 * If not yet done, update fs_flags location and value of fs_sblockloc.
1121	 */
1122	if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1123		fs->fs_flags = fs->fs_old_flags;
1124		fs->fs_old_flags |= FS_FLAGS_UPDATED;
1125		fs->fs_sblockloc = sblockloc;
1126	}
1127	/*
1128	 * If not yet done, update UFS1 superblock with new wider fields.
1129	 */
1130	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1131		fs->fs_maxbsize = fs->fs_bsize;
1132		fs->fs_time = fs->fs_old_time;
1133		fs->fs_size = fs->fs_old_size;
1134		fs->fs_dsize = fs->fs_old_dsize;
1135		fs->fs_csaddr = fs->fs_old_csaddr;
1136		fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1137		fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1138		fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1139		fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1140	}
1141	if (fs->fs_magic == FS_UFS1_MAGIC &&
1142	    fs->fs_old_inodefmt < FS_44INODEFMT) {
1143		fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1144		fs->fs_qbmask = ~fs->fs_bmask;
1145		fs->fs_qfmask = ~fs->fs_fmask;
1146	}
1147	if (fs->fs_magic == FS_UFS1_MAGIC) {
1148		ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1149		maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1150		if (fs->fs_maxfilesize > maxfilesize)
1151			fs->fs_maxfilesize = maxfilesize;
1152	}
1153	/* Compatibility for old filesystems */
1154	if (fs->fs_avgfilesize <= 0)
1155		fs->fs_avgfilesize = AVFILESIZ;
1156	if (fs->fs_avgfpdir <= 0)
1157		fs->fs_avgfpdir = AFPDIR;
1158	if (bigcgs) {
1159		fs->fs_save_cgsize = fs->fs_cgsize;
1160		fs->fs_cgsize = fs->fs_bsize;
1161	}
1162}
1163
1164/*
1165 * Unwinding superblock updates for old filesystems.
1166 * See ffs_oldfscompat_read above for details.
1167 *
1168 * XXX - Parts get retired eventually.
1169 * Unfortunately new bits get added.
1170 */
1171void
1172ffs_oldfscompat_write(fs, ump)
1173	struct fs *fs;
1174	struct ufsmount *ump;
1175{
1176
1177	/*
1178	 * Copy back UFS2 updated fields that UFS1 inspects.
1179	 */
1180	if (fs->fs_magic == FS_UFS1_MAGIC) {
1181		fs->fs_old_time = fs->fs_time;
1182		fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1183		fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1184		fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1185		fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1186		fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1187	}
1188	if (bigcgs) {
1189		fs->fs_cgsize = fs->fs_save_cgsize;
1190		fs->fs_save_cgsize = 0;
1191	}
1192}
1193
1194/*
1195 * unmount system call
1196 */
1197static int
1198ffs_unmount(mp, mntflags)
1199	struct mount *mp;
1200	int mntflags;
1201{
1202	struct thread *td;
1203	struct ufsmount *ump = VFSTOUFS(mp);
1204	struct fs *fs;
1205	int error, flags, susp;
1206#ifdef UFS_EXTATTR
1207	int e_restart;
1208#endif
1209
1210	flags = 0;
1211	td = curthread;
1212	fs = ump->um_fs;
1213	susp = 0;
1214	if (mntflags & MNT_FORCE) {
1215		flags |= FORCECLOSE;
1216		susp = fs->fs_ronly == 0;
1217	}
1218#ifdef UFS_EXTATTR
1219	if ((error = ufs_extattr_stop(mp, td))) {
1220		if (error != EOPNOTSUPP)
1221			printf("WARNING: unmount %s: ufs_extattr_stop "
1222			    "returned errno %d\n", mp->mnt_stat.f_mntonname,
1223			    error);
1224		e_restart = 0;
1225	} else {
1226		ufs_extattr_uepm_destroy(&ump->um_extattr);
1227		e_restart = 1;
1228	}
1229#endif
1230	if (susp) {
1231		error = vfs_write_suspend_umnt(mp);
1232		if (error != 0)
1233			goto fail1;
1234	}
1235	if (MOUNTEDSOFTDEP(mp))
1236		error = softdep_flushfiles(mp, flags, td);
1237	else
1238		error = ffs_flushfiles(mp, flags, td);
1239	if (error != 0 && error != ENXIO)
1240		goto fail;
1241
1242	UFS_LOCK(ump);
1243	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1244		printf("WARNING: unmount %s: pending error: blocks %jd "
1245		    "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1246		    fs->fs_pendinginodes);
1247		fs->fs_pendingblocks = 0;
1248		fs->fs_pendinginodes = 0;
1249	}
1250	UFS_UNLOCK(ump);
1251	if (MOUNTEDSOFTDEP(mp))
1252		softdep_unmount(mp);
1253	if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) {
1254		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1255		error = ffs_sbupdate(ump, MNT_WAIT, 0);
1256		if (error && error != ENXIO) {
1257			fs->fs_clean = 0;
1258			goto fail;
1259		}
1260	}
1261	if (susp)
1262		vfs_write_resume(mp, VR_START_WRITE);
1263	DROP_GIANT();
1264	g_topology_lock();
1265	if (ump->um_fsckpid > 0) {
1266		/*
1267		 * Return to normal read-only mode.
1268		 */
1269		error = g_access(ump->um_cp, 0, -1, 0);
1270		ump->um_fsckpid = 0;
1271	}
1272	g_vfs_close(ump->um_cp);
1273	g_topology_unlock();
1274	PICKUP_GIANT();
1275	if (ump->um_devvp->v_type == VCHR && ump->um_devvp->v_rdev != NULL)
1276		ump->um_devvp->v_rdev->si_mountpt = NULL;
1277	vrele(ump->um_devvp);
1278	dev_rel(ump->um_dev);
1279	mtx_destroy(UFS_MTX(ump));
1280	if (mp->mnt_gjprovider != NULL) {
1281		free(mp->mnt_gjprovider, M_UFSMNT);
1282		mp->mnt_gjprovider = NULL;
1283	}
1284	free(fs->fs_csp, M_UFSMNT);
1285	free(fs, M_UFSMNT);
1286	free(ump, M_UFSMNT);
1287	mp->mnt_data = NULL;
1288	MNT_ILOCK(mp);
1289	mp->mnt_flag &= ~MNT_LOCAL;
1290	MNT_IUNLOCK(mp);
1291	return (error);
1292
1293fail:
1294	if (susp)
1295		vfs_write_resume(mp, VR_START_WRITE);
1296fail1:
1297#ifdef UFS_EXTATTR
1298	if (e_restart) {
1299		ufs_extattr_uepm_init(&ump->um_extattr);
1300#ifdef UFS_EXTATTR_AUTOSTART
1301		(void) ufs_extattr_autostart(mp, td);
1302#endif
1303	}
1304#endif
1305
1306	return (error);
1307}
1308
1309/*
1310 * Flush out all the files in a filesystem.
1311 */
1312int
1313ffs_flushfiles(mp, flags, td)
1314	struct mount *mp;
1315	int flags;
1316	struct thread *td;
1317{
1318	struct ufsmount *ump;
1319	int qerror, error;
1320
1321	ump = VFSTOUFS(mp);
1322	qerror = 0;
1323#ifdef QUOTA
1324	if (mp->mnt_flag & MNT_QUOTA) {
1325		int i;
1326		error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1327		if (error)
1328			return (error);
1329		for (i = 0; i < MAXQUOTAS; i++) {
1330			error = quotaoff(td, mp, i);
1331			if (error != 0) {
1332				if ((flags & EARLYFLUSH) == 0)
1333					return (error);
1334				else
1335					qerror = error;
1336			}
1337		}
1338
1339		/*
1340		 * Here we fall through to vflush again to ensure that
1341		 * we have gotten rid of all the system vnodes, unless
1342		 * quotas must not be closed.
1343		 */
1344	}
1345#endif
1346	ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles");
1347	if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1348		if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1349			return (error);
1350		ffs_snapshot_unmount(mp);
1351		flags |= FORCECLOSE;
1352		/*
1353		 * Here we fall through to vflush again to ensure
1354		 * that we have gotten rid of all the system vnodes.
1355		 */
1356	}
1357
1358	/*
1359	 * Do not close system files if quotas were not closed, to be
1360	 * able to sync the remaining dquots.  The freeblks softupdate
1361	 * workitems might hold a reference on a dquot, preventing
1362	 * quotaoff() from completing.  Next round of
1363	 * softdep_flushworklist() iteration should process the
1364	 * blockers, allowing the next run of quotaoff() to finally
1365	 * flush held dquots.
1366	 *
1367	 * Otherwise, flush all the files.
1368	 */
1369	if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1370		return (error);
1371
1372	/*
1373	 * Flush filesystem metadata.
1374	 */
1375	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1376	error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1377	VOP_UNLOCK(ump->um_devvp, 0);
1378	return (error);
1379}
1380
1381/*
1382 * Get filesystem statistics.
1383 */
1384static int
1385ffs_statfs(mp, sbp)
1386	struct mount *mp;
1387	struct statfs *sbp;
1388{
1389	struct ufsmount *ump;
1390	struct fs *fs;
1391
1392	ump = VFSTOUFS(mp);
1393	fs = ump->um_fs;
1394	if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1395		panic("ffs_statfs");
1396	sbp->f_version = STATFS_VERSION;
1397	sbp->f_bsize = fs->fs_fsize;
1398	sbp->f_iosize = fs->fs_bsize;
1399	sbp->f_blocks = fs->fs_dsize;
1400	UFS_LOCK(ump);
1401	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1402	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1403	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1404	    dbtofsb(fs, fs->fs_pendingblocks);
1405	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
1406	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1407	UFS_UNLOCK(ump);
1408	sbp->f_namemax = NAME_MAX;
1409	return (0);
1410}
1411
1412/*
1413 * For a lazy sync, we only care about access times, quotas and the
1414 * superblock.  Other filesystem changes are already converted to
1415 * cylinder group blocks or inode blocks updates and are written to
1416 * disk by syncer.
1417 */
1418static int
1419ffs_sync_lazy(mp)
1420     struct mount *mp;
1421{
1422	struct vnode *mvp, *vp;
1423	struct inode *ip;
1424	struct thread *td;
1425	int allerror, error;
1426
1427	allerror = 0;
1428	td = curthread;
1429	if ((mp->mnt_flag & MNT_NOATIME) != 0)
1430		goto qupdate;
1431	MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
1432		if (vp->v_type == VNON) {
1433			VI_UNLOCK(vp);
1434			continue;
1435		}
1436		ip = VTOI(vp);
1437
1438		/*
1439		 * The IN_ACCESS flag is converted to IN_MODIFIED by
1440		 * ufs_close() and ufs_getattr() by the calls to
1441		 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1442		 * Test also all the other timestamp flags too, to pick up
1443		 * any other cases that could be missed.
1444		 */
1445		if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1446		    IN_UPDATE)) == 0) {
1447			VI_UNLOCK(vp);
1448			continue;
1449		}
1450		if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
1451		    td)) != 0)
1452			continue;
1453		error = ffs_update(vp, 0);
1454		if (error != 0)
1455			allerror = error;
1456		vput(vp);
1457	}
1458
1459qupdate:
1460#ifdef QUOTA
1461	qsync(mp);
1462#endif
1463
1464	if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1465	    (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1466		allerror = error;
1467	return (allerror);
1468}
1469
1470/*
1471 * Go through the disk queues to initiate sandbagged IO;
1472 * go through the inodes to write those that have been modified;
1473 * initiate the writing of the super block if it has been modified.
1474 *
1475 * Note: we are always called with the filesystem marked busy using
1476 * vfs_busy().
1477 */
1478static int
1479ffs_sync(mp, waitfor)
1480	struct mount *mp;
1481	int waitfor;
1482{
1483	struct vnode *mvp, *vp, *devvp;
1484	struct thread *td;
1485	struct inode *ip;
1486	struct ufsmount *ump = VFSTOUFS(mp);
1487	struct fs *fs;
1488	int error, count, wait, lockreq, allerror = 0;
1489	int suspend;
1490	int suspended;
1491	int secondary_writes;
1492	int secondary_accwrites;
1493	int softdep_deps;
1494	int softdep_accdeps;
1495	struct bufobj *bo;
1496
1497	wait = 0;
1498	suspend = 0;
1499	suspended = 0;
1500	td = curthread;
1501	fs = ump->um_fs;
1502	if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0)
1503		panic("%s: ffs_sync: modification on read-only filesystem",
1504		    fs->fs_fsmnt);
1505	if (waitfor == MNT_LAZY)
1506		return (ffs_sync_lazy(mp));
1507
1508	/*
1509	 * Write back each (modified) inode.
1510	 */
1511	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1512	if (waitfor == MNT_SUSPEND) {
1513		suspend = 1;
1514		waitfor = MNT_WAIT;
1515	}
1516	if (waitfor == MNT_WAIT) {
1517		wait = 1;
1518		lockreq = LK_EXCLUSIVE;
1519	}
1520	lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
1521loop:
1522	/* Grab snapshot of secondary write counts */
1523	MNT_ILOCK(mp);
1524	secondary_writes = mp->mnt_secondary_writes;
1525	secondary_accwrites = mp->mnt_secondary_accwrites;
1526	MNT_IUNLOCK(mp);
1527
1528	/* Grab snapshot of softdep dependency counts */
1529	softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1530
1531	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1532		/*
1533		 * Depend on the vnode interlock to keep things stable enough
1534		 * for a quick test.  Since there might be hundreds of
1535		 * thousands of vnodes, we cannot afford even a subroutine
1536		 * call unless there's a good chance that we have work to do.
1537		 */
1538		if (vp->v_type == VNON) {
1539			VI_UNLOCK(vp);
1540			continue;
1541		}
1542		ip = VTOI(vp);
1543		if ((ip->i_flag &
1544		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1545		    vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1546			VI_UNLOCK(vp);
1547			continue;
1548		}
1549		if ((error = vget(vp, lockreq, td)) != 0) {
1550			if (error == ENOENT || error == ENOLCK) {
1551				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1552				goto loop;
1553			}
1554			continue;
1555		}
1556		if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0)
1557			allerror = error;
1558		vput(vp);
1559	}
1560	/*
1561	 * Force stale filesystem control information to be flushed.
1562	 */
1563	if (waitfor == MNT_WAIT) {
1564		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1565			allerror = error;
1566		/* Flushed work items may create new vnodes to clean */
1567		if (allerror == 0 && count)
1568			goto loop;
1569	}
1570#ifdef QUOTA
1571	qsync(mp);
1572#endif
1573
1574	devvp = ump->um_devvp;
1575	bo = &devvp->v_bufobj;
1576	BO_LOCK(bo);
1577	if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1578		BO_UNLOCK(bo);
1579		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1580		if ((error = VOP_FSYNC(devvp, waitfor, td)) != 0)
1581			allerror = error;
1582		VOP_UNLOCK(devvp, 0);
1583		if (allerror == 0 && waitfor == MNT_WAIT)
1584			goto loop;
1585	} else if (suspend != 0) {
1586		if (softdep_check_suspend(mp,
1587					  devvp,
1588					  softdep_deps,
1589					  softdep_accdeps,
1590					  secondary_writes,
1591					  secondary_accwrites) != 0) {
1592			MNT_IUNLOCK(mp);
1593			goto loop;	/* More work needed */
1594		}
1595		mtx_assert(MNT_MTX(mp), MA_OWNED);
1596		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1597		MNT_IUNLOCK(mp);
1598		suspended = 1;
1599	} else
1600		BO_UNLOCK(bo);
1601	/*
1602	 * Write back modified superblock.
1603	 */
1604	if (fs->fs_fmod != 0 &&
1605	    (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1606		allerror = error;
1607	return (allerror);
1608}
1609
1610int
1611ffs_vget(mp, ino, flags, vpp)
1612	struct mount *mp;
1613	ino_t ino;
1614	int flags;
1615	struct vnode **vpp;
1616{
1617	return (ffs_vgetf(mp, ino, flags, vpp, 0));
1618}
1619
1620int
1621ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
1622	struct mount *mp;
1623	ino_t ino;
1624	int flags;
1625	struct vnode **vpp;
1626	int ffs_flags;
1627{
1628	struct fs *fs;
1629	struct inode *ip;
1630	struct ufsmount *ump;
1631	struct buf *bp;
1632	struct vnode *vp;
1633	struct cdev *dev;
1634	int error;
1635
1636	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1637	if (error || *vpp != NULL)
1638		return (error);
1639
1640	/*
1641	 * We must promote to an exclusive lock for vnode creation.  This
1642	 * can happen if lookup is passed LOCKSHARED.
1643	 */
1644	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1645		flags &= ~LK_TYPE_MASK;
1646		flags |= LK_EXCLUSIVE;
1647	}
1648
1649	/*
1650	 * We do not lock vnode creation as it is believed to be too
1651	 * expensive for such rare case as simultaneous creation of vnode
1652	 * for same ino by different processes. We just allow them to race
1653	 * and check later to decide who wins. Let the race begin!
1654	 */
1655
1656	ump = VFSTOUFS(mp);
1657	dev = ump->um_dev;
1658	fs = ump->um_fs;
1659	ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO);
1660
1661	/* Allocate a new vnode/inode. */
1662	if (fs->fs_magic == FS_UFS1_MAGIC)
1663		error = getnewvnode("ufs", mp, &ffs_vnodeops1, &vp);
1664	else
1665		error = getnewvnode("ufs", mp, &ffs_vnodeops2, &vp);
1666	if (error) {
1667		*vpp = NULL;
1668		uma_zfree(uma_inode, ip);
1669		return (error);
1670	}
1671	/*
1672	 * FFS supports recursive locking.
1673	 */
1674	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
1675	VN_LOCK_AREC(vp);
1676	vp->v_data = ip;
1677	vp->v_bufobj.bo_bsize = fs->fs_bsize;
1678	ip->i_vnode = vp;
1679	ip->i_ump = ump;
1680	ip->i_fs = fs;
1681	ip->i_dev = dev;
1682	ip->i_number = ino;
1683	ip->i_ea_refs = 0;
1684#ifdef QUOTA
1685	{
1686		int i;
1687		for (i = 0; i < MAXQUOTAS; i++)
1688			ip->i_dquot[i] = NODQUOT;
1689	}
1690#endif
1691
1692	if (ffs_flags & FFSV_FORCEINSMQ)
1693		vp->v_vflag |= VV_FORCEINSMQ;
1694	error = insmntque(vp, mp);
1695	if (error != 0) {
1696		uma_zfree(uma_inode, ip);
1697		*vpp = NULL;
1698		return (error);
1699	}
1700	vp->v_vflag &= ~VV_FORCEINSMQ;
1701	error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1702	if (error || *vpp != NULL)
1703		return (error);
1704
1705	/* Read in the disk contents for the inode, copy into the inode. */
1706	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1707	    (int)fs->fs_bsize, NOCRED, &bp);
1708	if (error) {
1709		/*
1710		 * The inode does not contain anything useful, so it would
1711		 * be misleading to leave it on its hash chain. With mode
1712		 * still zero, it will be unlinked and returned to the free
1713		 * list by vput().
1714		 */
1715		brelse(bp);
1716		vput(vp);
1717		*vpp = NULL;
1718		return (error);
1719	}
1720	if (ip->i_ump->um_fstype == UFS1)
1721		ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1722	else
1723		ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1724	ffs_load_inode(bp, ip, fs, ino);
1725	if (DOINGSOFTDEP(vp))
1726		softdep_load_inodeblock(ip);
1727	else
1728		ip->i_effnlink = ip->i_nlink;
1729	bqrelse(bp);
1730
1731	/*
1732	 * Initialize the vnode from the inode, check for aliases.
1733	 * Note that the underlying vnode may have changed.
1734	 */
1735	if (ip->i_ump->um_fstype == UFS1)
1736		error = ufs_vinit(mp, &ffs_fifoops1, &vp);
1737	else
1738		error = ufs_vinit(mp, &ffs_fifoops2, &vp);
1739	if (error) {
1740		vput(vp);
1741		*vpp = NULL;
1742		return (error);
1743	}
1744
1745	/*
1746	 * Finish inode initialization.
1747	 */
1748	if (vp->v_type != VFIFO) {
1749		/* FFS supports shared locking for all files except fifos. */
1750		VN_LOCK_ASHARE(vp);
1751	}
1752
1753	/*
1754	 * Set up a generation number for this inode if it does not
1755	 * already have one. This should only happen on old filesystems.
1756	 */
1757	if (ip->i_gen == 0) {
1758		ip->i_gen = arc4random() / 2 + 1;
1759		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
1760			ip->i_flag |= IN_MODIFIED;
1761			DIP_SET(ip, i_gen, ip->i_gen);
1762		}
1763	}
1764#ifdef MAC
1765	if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
1766		/*
1767		 * If this vnode is already allocated, and we're running
1768		 * multi-label, attempt to perform a label association
1769		 * from the extended attributes on the inode.
1770		 */
1771		error = mac_vnode_associate_extattr(mp, vp);
1772		if (error) {
1773			/* ufs_inactive will release ip->i_devvp ref. */
1774			vput(vp);
1775			*vpp = NULL;
1776			return (error);
1777		}
1778	}
1779#endif
1780
1781	*vpp = vp;
1782	return (0);
1783}
1784
1785/*
1786 * File handle to vnode
1787 *
1788 * Have to be really careful about stale file handles:
1789 * - check that the inode number is valid
1790 * - call ffs_vget() to get the locked inode
1791 * - check for an unallocated inode (i_mode == 0)
1792 * - check that the given client host has export rights and return
1793 *   those rights via. exflagsp and credanonp
1794 */
1795static int
1796ffs_fhtovp(mp, fhp, flags, vpp)
1797	struct mount *mp;
1798	struct fid *fhp;
1799	int flags;
1800	struct vnode **vpp;
1801{
1802	struct ufid *ufhp;
1803	struct fs *fs;
1804
1805	ufhp = (struct ufid *)fhp;
1806	fs = VFSTOUFS(mp)->um_fs;
1807	if (ufhp->ufid_ino < ROOTINO ||
1808	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1809		return (ESTALE);
1810	return (ufs_fhtovp(mp, ufhp, flags, vpp));
1811}
1812
1813/*
1814 * Initialize the filesystem.
1815 */
1816static int
1817ffs_init(vfsp)
1818	struct vfsconf *vfsp;
1819{
1820
1821	ffs_susp_initialize();
1822	softdep_initialize();
1823	return (ufs_init(vfsp));
1824}
1825
1826/*
1827 * Undo the work of ffs_init().
1828 */
1829static int
1830ffs_uninit(vfsp)
1831	struct vfsconf *vfsp;
1832{
1833	int ret;
1834
1835	ret = ufs_uninit(vfsp);
1836	softdep_uninitialize();
1837	ffs_susp_uninitialize();
1838	return (ret);
1839}
1840
1841/*
1842 * Write a superblock and associated information back to disk.
1843 */
1844int
1845ffs_sbupdate(ump, waitfor, suspended)
1846	struct ufsmount *ump;
1847	int waitfor;
1848	int suspended;
1849{
1850	struct fs *fs = ump->um_fs;
1851	struct buf *sbbp;
1852	struct buf *bp;
1853	int blks;
1854	void *space;
1855	int i, size, error, allerror = 0;
1856
1857	if (fs->fs_ronly == 1 &&
1858	    (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
1859	    (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0)
1860		panic("ffs_sbupdate: write read-only filesystem");
1861	/*
1862	 * We use the superblock's buf to serialize calls to ffs_sbupdate().
1863	 */
1864	sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
1865	    (int)fs->fs_sbsize, 0, 0, 0);
1866	/*
1867	 * First write back the summary information.
1868	 */
1869	blks = howmany(fs->fs_cssize, fs->fs_fsize);
1870	space = fs->fs_csp;
1871	for (i = 0; i < blks; i += fs->fs_frag) {
1872		size = fs->fs_bsize;
1873		if (i + fs->fs_frag > blks)
1874			size = (blks - i) * fs->fs_fsize;
1875		bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1876		    size, 0, 0, 0);
1877		bcopy(space, bp->b_data, (u_int)size);
1878		space = (char *)space + size;
1879		if (suspended)
1880			bp->b_flags |= B_VALIDSUSPWRT;
1881		if (waitfor != MNT_WAIT)
1882			bawrite(bp);
1883		else if ((error = bwrite(bp)) != 0)
1884			allerror = error;
1885	}
1886	/*
1887	 * Now write back the superblock itself. If any errors occurred
1888	 * up to this point, then fail so that the superblock avoids
1889	 * being written out as clean.
1890	 */
1891	if (allerror) {
1892		brelse(sbbp);
1893		return (allerror);
1894	}
1895	bp = sbbp;
1896	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
1897	    (fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
1898		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
1899		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
1900		fs->fs_sblockloc = SBLOCK_UFS1;
1901	}
1902	if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
1903	    (fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
1904		printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
1905		    fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
1906		fs->fs_sblockloc = SBLOCK_UFS2;
1907	}
1908	fs->fs_fmod = 0;
1909	fs->fs_time = time_second;
1910	if (MOUNTEDSOFTDEP(ump->um_mountp))
1911		softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
1912	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1913	ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
1914	if (suspended)
1915		bp->b_flags |= B_VALIDSUSPWRT;
1916	if (waitfor != MNT_WAIT)
1917		bawrite(bp);
1918	else if ((error = bwrite(bp)) != 0)
1919		allerror = error;
1920	return (allerror);
1921}
1922
1923static int
1924ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
1925	int attrnamespace, const char *attrname)
1926{
1927
1928#ifdef UFS_EXTATTR
1929	return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
1930	    attrname));
1931#else
1932	return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
1933	    attrname));
1934#endif
1935}
1936
1937static void
1938ffs_ifree(struct ufsmount *ump, struct inode *ip)
1939{
1940
1941	if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
1942		uma_zfree(uma_ufs1, ip->i_din1);
1943	else if (ip->i_din2 != NULL)
1944		uma_zfree(uma_ufs2, ip->i_din2);
1945	uma_zfree(uma_inode, ip);
1946}
1947
1948static int dobkgrdwrite = 1;
1949SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
1950    "Do background writes (honoring the BV_BKGRDWRITE flag)?");
1951
1952/*
1953 * Complete a background write started from bwrite.
1954 */
1955static void
1956ffs_backgroundwritedone(struct buf *bp)
1957{
1958	struct bufobj *bufobj;
1959	struct buf *origbp;
1960
1961	/*
1962	 * Find the original buffer that we are writing.
1963	 */
1964	bufobj = bp->b_bufobj;
1965	BO_LOCK(bufobj);
1966	if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
1967		panic("backgroundwritedone: lost buffer");
1968	BO_UNLOCK(bufobj);
1969	/*
1970	 * Process dependencies then return any unfinished ones.
1971	 */
1972	pbrelvp(bp);
1973	if (!LIST_EMPTY(&bp->b_dep))
1974		buf_complete(bp);
1975#ifdef SOFTUPDATES
1976	if (!LIST_EMPTY(&bp->b_dep))
1977		softdep_move_dependencies(bp, origbp);
1978#endif
1979	/*
1980	 * This buffer is marked B_NOCACHE so when it is released
1981	 * by biodone it will be tossed.
1982	 */
1983	bp->b_flags |= B_NOCACHE;
1984	bp->b_flags &= ~B_CACHE;
1985	bufdone(bp);
1986	BO_LOCK(bufobj);
1987	/*
1988	 * Clear the BV_BKGRDINPROG flag in the original buffer
1989	 * and awaken it if it is waiting for the write to complete.
1990	 * If BV_BKGRDINPROG is not set in the original buffer it must
1991	 * have been released and re-instantiated - which is not legal.
1992	 */
1993	KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
1994	    ("backgroundwritedone: lost buffer2"));
1995	origbp->b_vflags &= ~BV_BKGRDINPROG;
1996	if (origbp->b_vflags & BV_BKGRDWAIT) {
1997		origbp->b_vflags &= ~BV_BKGRDWAIT;
1998		wakeup(&origbp->b_xflags);
1999	}
2000	BO_UNLOCK(bufobj);
2001}
2002
2003
2004/*
2005 * Write, release buffer on completion.  (Done by iodone
2006 * if async).  Do not bother writing anything if the buffer
2007 * is invalid.
2008 *
2009 * Note that we set B_CACHE here, indicating that buffer is
2010 * fully valid and thus cacheable.  This is true even of NFS
2011 * now so we set it generally.  This could be set either here
2012 * or in biodone() since the I/O is synchronous.  We put it
2013 * here.
2014 */
2015static int
2016ffs_bufwrite(struct buf *bp)
2017{
2018	struct buf *newbp;
2019	int oldflags;
2020
2021	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2022	if (bp->b_flags & B_INVAL) {
2023		brelse(bp);
2024		return (0);
2025	}
2026
2027	oldflags = bp->b_flags;
2028
2029	if (!BUF_ISLOCKED(bp))
2030		panic("bufwrite: buffer is not busy???");
2031	/*
2032	 * If a background write is already in progress, delay
2033	 * writing this block if it is asynchronous. Otherwise
2034	 * wait for the background write to complete.
2035	 */
2036	BO_LOCK(bp->b_bufobj);
2037	if (bp->b_vflags & BV_BKGRDINPROG) {
2038		if (bp->b_flags & B_ASYNC) {
2039			BO_UNLOCK(bp->b_bufobj);
2040			bdwrite(bp);
2041			return (0);
2042		}
2043		bp->b_vflags |= BV_BKGRDWAIT;
2044		msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2045		    "bwrbg", 0);
2046		if (bp->b_vflags & BV_BKGRDINPROG)
2047			panic("bufwrite: still writing");
2048	}
2049	BO_UNLOCK(bp->b_bufobj);
2050
2051	/*
2052	 * If this buffer is marked for background writing and we
2053	 * do not have to wait for it, make a copy and write the
2054	 * copy so as to leave this buffer ready for further use.
2055	 *
2056	 * This optimization eats a lot of memory.  If we have a page
2057	 * or buffer shortfall we can't do it.
2058	 */
2059	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2060	    (bp->b_flags & B_ASYNC) &&
2061	    !vm_page_count_severe() &&
2062	    !buf_dirty_count_severe()) {
2063		KASSERT(bp->b_iodone == NULL,
2064		    ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2065
2066		/* get a new block */
2067		newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2068		if (newbp == NULL)
2069			goto normal_write;
2070
2071		KASSERT((bp->b_flags & B_UNMAPPED) == 0, ("Unmapped cg"));
2072		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2073		BO_LOCK(bp->b_bufobj);
2074		bp->b_vflags |= BV_BKGRDINPROG;
2075		BO_UNLOCK(bp->b_bufobj);
2076		newbp->b_xflags |= BX_BKGRDMARKER;
2077		newbp->b_lblkno = bp->b_lblkno;
2078		newbp->b_blkno = bp->b_blkno;
2079		newbp->b_offset = bp->b_offset;
2080		newbp->b_iodone = ffs_backgroundwritedone;
2081		newbp->b_flags |= B_ASYNC;
2082		newbp->b_flags &= ~B_INVAL;
2083		pbgetvp(bp->b_vp, newbp);
2084
2085#ifdef SOFTUPDATES
2086		/*
2087		 * Move over the dependencies.  If there are rollbacks,
2088		 * leave the parent buffer dirtied as it will need to
2089		 * be written again.
2090		 */
2091		if (LIST_EMPTY(&bp->b_dep) ||
2092		    softdep_move_dependencies(bp, newbp) == 0)
2093			bundirty(bp);
2094#else
2095		bundirty(bp);
2096#endif
2097
2098		/*
2099		 * Initiate write on the copy, release the original.  The
2100		 * BKGRDINPROG flag prevents it from going away until
2101		 * the background write completes.
2102		 */
2103		bqrelse(bp);
2104		bp = newbp;
2105	} else
2106		/* Mark the buffer clean */
2107		bundirty(bp);
2108
2109
2110	/* Let the normal bufwrite do the rest for us */
2111normal_write:
2112	return (bufwrite(bp));
2113}
2114
2115
2116static void
2117ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2118{
2119	struct vnode *vp;
2120	int error;
2121	struct buf *tbp;
2122	int nocopy;
2123
2124	vp = bo->__bo_vnode;
2125	if (bp->b_iocmd == BIO_WRITE) {
2126		if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2127		    bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2128		    (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2129			panic("ffs_geom_strategy: bad I/O");
2130		nocopy = bp->b_flags & B_NOCOPY;
2131		bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2132		if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2133		    vp->v_rdev->si_snapdata != NULL) {
2134			if ((bp->b_flags & B_CLUSTER) != 0) {
2135				runningbufwakeup(bp);
2136				TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2137					      b_cluster.cluster_entry) {
2138					error = ffs_copyonwrite(vp, tbp);
2139					if (error != 0 &&
2140					    error != EOPNOTSUPP) {
2141						bp->b_error = error;
2142						bp->b_ioflags |= BIO_ERROR;
2143						bufdone(bp);
2144						return;
2145					}
2146				}
2147				bp->b_runningbufspace = bp->b_bufsize;
2148				atomic_add_long(&runningbufspace,
2149					       bp->b_runningbufspace);
2150			} else {
2151				error = ffs_copyonwrite(vp, bp);
2152				if (error != 0 && error != EOPNOTSUPP) {
2153					bp->b_error = error;
2154					bp->b_ioflags |= BIO_ERROR;
2155					bufdone(bp);
2156					return;
2157				}
2158			}
2159		}
2160#ifdef SOFTUPDATES
2161		if ((bp->b_flags & B_CLUSTER) != 0) {
2162			TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2163				      b_cluster.cluster_entry) {
2164				if (!LIST_EMPTY(&tbp->b_dep))
2165					buf_start(tbp);
2166			}
2167		} else {
2168			if (!LIST_EMPTY(&bp->b_dep))
2169				buf_start(bp);
2170		}
2171
2172#endif
2173	}
2174	g_vfs_strategy(bo, bp);
2175}
2176
2177int
2178ffs_own_mount(const struct mount *mp)
2179{
2180
2181	if (mp->mnt_op == &ufs_vfsops)
2182		return (1);
2183	return (0);
2184}
2185
2186#ifdef	DDB
2187#ifdef SOFTUPDATES
2188
2189/* defined in ffs_softdep.c */
2190extern void db_print_ffs(struct ufsmount *ump);
2191
2192DB_SHOW_COMMAND(ffs, db_show_ffs)
2193{
2194	struct mount *mp;
2195	struct ufsmount *ump;
2196
2197	if (have_addr) {
2198		ump = VFSTOUFS((struct mount *)addr);
2199		db_print_ffs(ump);
2200		return;
2201	}
2202
2203	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2204		if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2205			db_print_ffs(VFSTOUFS(mp));
2206	}
2207}
2208
2209#endif	/* SOFTUPDATES */
2210#endif	/* DDB */
2211