ffs_vfsops.c revision 69974
1/*
2 * Copyright (c) 1989, 1991, 1993, 1994
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
34 * $FreeBSD: head/sys/ufs/ffs/ffs_vfsops.c 69974 2000-12-13 10:04:01Z tanimura $
35 */
36
37#include "opt_ffs.h"
38#include "opt_quota.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/namei.h>
43#include <sys/proc.h>
44#include <sys/kernel.h>
45#include <sys/vnode.h>
46#include <sys/mount.h>
47#include <sys/bio.h>
48#include <sys/buf.h>
49#include <sys/conf.h>
50#include <sys/fcntl.h>
51#include <sys/disklabel.h>
52#include <sys/malloc.h>
53
54#include <machine/mutex.h>
55
56#include <ufs/ufs/extattr.h>
57#include <ufs/ufs/quota.h>
58#include <ufs/ufs/ufsmount.h>
59#include <ufs/ufs/inode.h>
60#include <ufs/ufs/ufs_extern.h>
61
62#include <ufs/ffs/fs.h>
63#include <ufs/ffs/ffs_extern.h>
64
65#include <vm/vm.h>
66#include <vm/vm_page.h>
67
68static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
69
70static int	ffs_sbupdate __P((struct ufsmount *, int));
71int	ffs_reload __P((struct mount *,struct ucred *,struct proc *));
72static int	ffs_oldfscompat __P((struct fs *));
73static int	ffs_init __P((struct vfsconf *));
74
75static struct vfsops ufs_vfsops = {
76	ffs_mount,
77	ufs_start,
78	ffs_unmount,
79	ufs_root,
80	ufs_quotactl,
81	ffs_statfs,
82	ffs_sync,
83	ffs_vget,
84	ffs_fhtovp,
85	ufs_check_export,
86	ffs_vptofh,
87	ffs_init,
88	vfs_stduninit,
89#ifdef FFS_EXTATTR
90	ufs_extattrctl,
91#else
92	vfs_stdextattrctl,
93#endif
94};
95
96VFS_SET(ufs_vfsops, ufs, 0);
97
98/*
99 * ffs_mount
100 *
101 * Called when mounting local physical media
102 *
103 * PARAMETERS:
104 *		mountroot
105 *			mp	mount point structure
106 *			path	NULL (flag for root mount!!!)
107 *			data	<unused>
108 *			ndp	<unused>
109 *			p	process (user credentials check [statfs])
110 *
111 *		mount
112 *			mp	mount point structure
113 *			path	path to mount point
114 *			data	pointer to argument struct in user space
115 *			ndp	mount point namei() return (used for
116 *				credentials on reload), reused to look
117 *				up block device.
118 *			p	process (user credentials check)
119 *
120 * RETURNS:	0	Success
121 *		!0	error number (errno.h)
122 *
123 * LOCK STATE:
124 *
125 *		ENTRY
126 *			mount point is locked
127 *		EXIT
128 *			mount point is locked
129 *
130 * NOTES:
131 *		A NULL path can be used for a flag since the mount
132 *		system call will fail with EFAULT in copyinstr in
133 *		namei() if it is a genuine NULL from the user.
134 */
135int
136ffs_mount(mp, path, data, ndp, p)
137        struct mount		*mp;	/* mount struct pointer*/
138        char			*path;	/* path to mount point*/
139        caddr_t			data;	/* arguments to FS specific mount*/
140        struct nameidata	*ndp;	/* mount point credentials*/
141        struct proc		*p;	/* process requesting mount*/
142{
143	size_t		size;
144	struct vnode	*devvp;
145	struct ufs_args args;
146	struct ufsmount *ump = 0;
147	register struct fs *fs;
148	int error, flags;
149	mode_t accessmode;
150
151	/*
152	 * Use NULL path to indicate we are mounting the root file system.
153	 */
154	if (path == NULL) {
155		if ((error = bdevvp(rootdev, &rootvp))) {
156			printf("ffs_mountroot: can't find rootvp\n");
157			return (error);
158		}
159
160		if ((error = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0)
161			return (error);
162
163		(void)VFS_STATFS(mp, &mp->mnt_stat, p);
164		return (0);
165	}
166
167	/*
168	 * Mounting non-root file system or updating a file system
169	 */
170	if ((error = copyin(data, (caddr_t)&args, sizeof(struct ufs_args)))!= 0)
171		return (error);
172
173	/*
174	 * If updating, check whether changing from read-only to
175	 * read/write; if there is no device name, that's all we do.
176	 */
177	if (mp->mnt_flag & MNT_UPDATE) {
178		ump = VFSTOUFS(mp);
179		fs = ump->um_fs;
180		devvp = ump->um_devvp;
181		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
182			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
183				return (error);
184			flags = WRITECLOSE;
185			if (mp->mnt_flag & MNT_FORCE)
186				flags |= FORCECLOSE;
187			if (mp->mnt_flag & MNT_SOFTDEP) {
188				error = softdep_flushfiles(mp, flags, p);
189			} else {
190				error = ffs_flushfiles(mp, flags, p);
191			}
192			if (error) {
193				vn_finished_write(mp);
194				return (error);
195			}
196			fs->fs_ronly = 1;
197			if ((fs->fs_flags & FS_UNCLEAN) == 0)
198				fs->fs_clean = 1;
199			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
200				fs->fs_ronly = 0;
201				fs->fs_clean = 0;
202				vn_finished_write(mp);
203				return (error);
204			}
205			vn_finished_write(mp);
206		}
207		if ((mp->mnt_flag & MNT_RELOAD) &&
208		    (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)) != 0)
209			return (error);
210		if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
211			/*
212			 * If upgrade to read-write by non-root, then verify
213			 * that user has necessary permissions on the device.
214			 */
215			if (p->p_ucred->cr_uid != 0) {
216				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
217				if ((error = VOP_ACCESS(devvp, VREAD | VWRITE,
218				    p->p_ucred, p)) != 0) {
219					VOP_UNLOCK(devvp, 0, p);
220					return (error);
221				}
222				VOP_UNLOCK(devvp, 0, p);
223			}
224			fs->fs_flags &= ~FS_UNCLEAN;
225			if (fs->fs_clean == 0) {
226				fs->fs_flags |= FS_UNCLEAN;
227				if (mp->mnt_flag & MNT_FORCE) {
228					printf("WARNING: %s was not %s\n",
229					   fs->fs_fsmnt, "properly dismounted");
230				} else {
231					printf(
232"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
233					    fs->fs_fsmnt);
234					return (EPERM);
235				}
236			}
237			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
238				return (error);
239			fs->fs_ronly = 0;
240			fs->fs_clean = 0;
241			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
242				vn_finished_write(mp);
243				return (error);
244			}
245			/* check to see if we need to start softdep */
246			if ((fs->fs_flags & FS_DOSOFTDEP) &&
247			    (error = softdep_mount(devvp, mp, fs, p->p_ucred))){
248				vn_finished_write(mp);
249				return (error);
250			}
251			if (fs->fs_snapinum[0] != 0)
252				ffs_snapshot_mount(mp);
253			vn_finished_write(mp);
254		}
255		/*
256		 * Soft updates is incompatible with "async",
257		 * so if we are doing softupdates stop the user
258		 * from setting the async flag in an update.
259		 * Softdep_mount() clears it in an initial mount
260		 * or ro->rw remount.
261		 */
262		if (mp->mnt_flag & MNT_SOFTDEP)
263			mp->mnt_flag &= ~MNT_ASYNC;
264		/*
265		 * If not updating name, process export requests.
266		 */
267		if (args.fspec == 0)
268			return (vfs_export(mp, &ump->um_export, &args.export));
269		/*
270		 * If this is a snapshot request, take the snapshot.
271		 */
272		if (mp->mnt_flag & MNT_SNAPSHOT)
273			return (ffs_snapshot(mp, args.fspec));
274	}
275
276	/*
277	 * Not an update, or updating the name: look up the name
278	 * and verify that it refers to a sensible block device.
279	 */
280	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
281	if ((error = namei(ndp)) != 0)
282		return (error);
283	NDFREE(ndp, NDF_ONLY_PNBUF);
284	devvp = ndp->ni_vp;
285	if (!vn_isdisk(devvp, &error)) {
286		vrele(devvp);
287		return (error);
288	}
289
290	/*
291	 * If mount by non-root, then verify that user has necessary
292	 * permissions on the device.
293	 */
294	if (p->p_ucred->cr_uid != 0) {
295		accessmode = VREAD;
296		if ((mp->mnt_flag & MNT_RDONLY) == 0)
297			accessmode |= VWRITE;
298		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
299		if ((error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p))!= 0){
300			vput(devvp);
301			return (error);
302		}
303		VOP_UNLOCK(devvp, 0, p);
304	}
305
306	if (mp->mnt_flag & MNT_UPDATE) {
307		/*
308		 * Update only
309		 *
310		 * If it's not the same vnode, or at least the same device
311		 * then it's not correct.
312		 */
313
314		if (devvp != ump->um_devvp &&
315		    devvp->v_rdev != ump->um_devvp->v_rdev)
316			error = EINVAL;	/* needs translation */
317		vrele(devvp);
318		if (error)
319			return (error);
320	} else {
321		/*
322		 * New mount
323		 *
324		 * We need the name for the mount point (also used for
325		 * "last mounted on") copied in. If an error occurs,
326		 * the mount point is discarded by the upper level code.
327		 */
328		copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
329		bzero( mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
330		if ((error = ffs_mountfs(devvp, mp, p, M_FFSNODE)) != 0) {
331			vrele(devvp);
332			return (error);
333		}
334	}
335	/*
336	 * Save "mounted from" device name info for mount point (NULL pad).
337	 */
338	copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
339	bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
340	/*
341	 * Initialize filesystem stat information in mount struct.
342	 */
343	(void)VFS_STATFS(mp, &mp->mnt_stat, p);
344	return (0);
345}
346
347/*
348 * Reload all incore data for a filesystem (used after running fsck on
349 * the root filesystem and finding things to fix). The filesystem must
350 * be mounted read-only.
351 *
352 * Things to do to update the mount:
353 *	1) invalidate all cached meta-data.
354 *	2) re-read superblock from disk.
355 *	3) re-read summary information from disk.
356 *	4) invalidate all inactive vnodes.
357 *	5) invalidate all cached file data.
358 *	6) re-read inode data for all active vnodes.
359 */
360int
361ffs_reload(mp, cred, p)
362	register struct mount *mp;
363	struct ucred *cred;
364	struct proc *p;
365{
366	register struct vnode *vp, *nvp, *devvp;
367	struct inode *ip;
368	struct csum *space;
369	struct buf *bp;
370	struct fs *fs, *newfs;
371	struct partinfo dpart;
372	dev_t dev;
373	int i, blks, size, error;
374	int32_t *lp;
375
376	if ((mp->mnt_flag & MNT_RDONLY) == 0)
377		return (EINVAL);
378	/*
379	 * Step 1: invalidate all cached meta-data.
380	 */
381	devvp = VFSTOUFS(mp)->um_devvp;
382	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
383	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
384	VOP_UNLOCK(devvp, 0, p);
385	if (error)
386		panic("ffs_reload: dirty1");
387
388	dev = devvp->v_rdev;
389
390	/*
391	 * Only VMIO the backing device if the backing device is a real
392	 * block device.  See ffs_mountmfs() for more details.
393	 */
394	if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
395		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
396		vfs_object_create(devvp, p, p->p_ucred);
397		mtx_enter(&devvp->v_interlock, MTX_DEF);
398		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
399	}
400
401	/*
402	 * Step 2: re-read superblock from disk.
403	 */
404	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
405		size = DEV_BSIZE;
406	else
407		size = dpart.disklab->d_secsize;
408	if ((error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) != 0)
409		return (error);
410	newfs = (struct fs *)bp->b_data;
411	if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
412		newfs->fs_bsize < sizeof(struct fs)) {
413			brelse(bp);
414			return (EIO);		/* XXX needs translation */
415	}
416	fs = VFSTOUFS(mp)->um_fs;
417	/*
418	 * Copy pointer fields back into superblock before copying in	XXX
419	 * new superblock. These should really be in the ufsmount.	XXX
420	 * Note that important parameters (eg fs_ncg) are unchanged.
421	 */
422	bcopy(&fs->fs_csp[0], &newfs->fs_csp[0], sizeof(fs->fs_csp));
423	newfs->fs_maxcluster = fs->fs_maxcluster;
424	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
425	if (fs->fs_sbsize < SBSIZE)
426		bp->b_flags |= B_INVAL | B_NOCACHE;
427	brelse(bp);
428	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
429	ffs_oldfscompat(fs);
430
431	/*
432	 * Step 3: re-read summary information from disk.
433	 */
434	blks = howmany(fs->fs_cssize, fs->fs_fsize);
435	space = fs->fs_csp[0];
436	for (i = 0; i < blks; i += fs->fs_frag) {
437		size = fs->fs_bsize;
438		if (i + fs->fs_frag > blks)
439			size = (blks - i) * fs->fs_fsize;
440		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
441		    NOCRED, &bp);
442		if (error)
443			return (error);
444		bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size);
445		brelse(bp);
446	}
447	/*
448	 * We no longer know anything about clusters per cylinder group.
449	 */
450	if (fs->fs_contigsumsize > 0) {
451		lp = fs->fs_maxcluster;
452		for (i = 0; i < fs->fs_ncg; i++)
453			*lp++ = fs->fs_contigsumsize;
454	}
455
456loop:
457	simple_lock(&mntvnode_slock);
458	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
459		if (vp->v_mount != mp) {
460			simple_unlock(&mntvnode_slock);
461			goto loop;
462		}
463		nvp = vp->v_mntvnodes.le_next;
464		/*
465		 * Step 4: invalidate all inactive vnodes.
466		 */
467		if (vrecycle(vp, &mntvnode_slock, p))
468			goto loop;
469		/*
470		 * Step 5: invalidate all cached file data.
471		 */
472		mtx_enter(&vp->v_interlock, MTX_DEF);
473		simple_unlock(&mntvnode_slock);
474		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
475			goto loop;
476		}
477		if (vinvalbuf(vp, 0, cred, p, 0, 0))
478			panic("ffs_reload: dirty2");
479		/*
480		 * Step 6: re-read inode data for all active vnodes.
481		 */
482		ip = VTOI(vp);
483		error =
484		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
485		    (int)fs->fs_bsize, NOCRED, &bp);
486		if (error) {
487			vput(vp);
488			return (error);
489		}
490		ip->i_din = *((struct dinode *)bp->b_data +
491		    ino_to_fsbo(fs, ip->i_number));
492		ip->i_effnlink = ip->i_nlink;
493		brelse(bp);
494		vput(vp);
495		simple_lock(&mntvnode_slock);
496	}
497	simple_unlock(&mntvnode_slock);
498	return (0);
499}
500
501/*
502 * Common code for mount and mountroot
503 */
504int
505ffs_mountfs(devvp, mp, p, malloctype)
506	register struct vnode *devvp;
507	struct mount *mp;
508	struct proc *p;
509	struct malloc_type *malloctype;
510{
511	register struct ufsmount *ump;
512	struct buf *bp;
513	register struct fs *fs;
514	dev_t dev;
515	struct partinfo dpart;
516	caddr_t base, space;
517	int error, i, blks, size, ronly;
518	int32_t *lp;
519	struct ucred *cred;
520	u_int64_t maxfilesize;					/* XXX */
521	size_t strsize;
522	int ncount;
523
524	dev = devvp->v_rdev;
525	cred = p ? p->p_ucred : NOCRED;
526	/*
527	 * Disallow multiple mounts of the same device.
528	 * Disallow mounting of a device that is currently in use
529	 * (except for root, which might share swap device for miniroot).
530	 * Flush out any old buffers remaining from a previous use.
531	 */
532	error = vfs_mountedon(devvp);
533	if (error)
534		return (error);
535	ncount = vcount(devvp);
536
537	if (ncount > 1 && devvp != rootvp)
538		return (EBUSY);
539	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
540	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
541	VOP_UNLOCK(devvp, 0, p);
542	if (error)
543		return (error);
544
545	/*
546	 * Only VMIO the backing device if the backing device is a real
547	 * block device.  This excludes the original MFS implementation.
548	 * Note that it is optional that the backing device be VMIOed.  This
549	 * increases the opportunity for metadata caching.
550	 */
551	if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
552		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
553		vfs_object_create(devvp, p, cred);
554		mtx_enter(&devvp->v_interlock, MTX_DEF);
555		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
556	}
557
558	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
559	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
560	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
561	VOP_UNLOCK(devvp, 0, p);
562	if (error)
563		return (error);
564	if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max)
565		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
566	if (mp->mnt_iosize_max > MAXPHYS)
567		mp->mnt_iosize_max = MAXPHYS;
568
569	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
570		size = DEV_BSIZE;
571	else
572		size = dpart.disklab->d_secsize;
573
574	bp = NULL;
575	ump = NULL;
576	if ((error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) != 0)
577		goto out;
578	fs = (struct fs *)bp->b_data;
579	if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
580	    fs->fs_bsize < sizeof(struct fs)) {
581		error = EINVAL;		/* XXX needs translation */
582		goto out;
583	}
584	fs->fs_fmod = 0;
585	fs->fs_flags &= ~FS_UNCLEAN;
586	if (fs->fs_clean == 0) {
587		fs->fs_flags |= FS_UNCLEAN;
588		if (ronly || (mp->mnt_flag & MNT_FORCE)) {
589			printf(
590"WARNING: %s was not properly dismounted\n",
591			    fs->fs_fsmnt);
592		} else {
593			printf(
594"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
595			    fs->fs_fsmnt);
596			error = EPERM;
597			goto out;
598		}
599	}
600	/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
601	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
602		error = EROFS;          /* needs translation */
603		goto out;
604	}
605	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
606	ump->um_malloctype = malloctype;
607	ump->um_i_effnlink_valid = 1;
608	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
609	    M_WAITOK);
610	ump->um_blkatoff = ffs_blkatoff;
611	ump->um_truncate = ffs_truncate;
612	ump->um_update = ffs_update;
613	ump->um_valloc = ffs_valloc;
614	ump->um_vfree = ffs_vfree;
615	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
616	if (fs->fs_sbsize < SBSIZE)
617		bp->b_flags |= B_INVAL | B_NOCACHE;
618	brelse(bp);
619	bp = NULL;
620	fs = ump->um_fs;
621	fs->fs_ronly = ronly;
622	size = fs->fs_cssize;
623	blks = howmany(size, fs->fs_fsize);
624	if (fs->fs_contigsumsize > 0)
625		size += fs->fs_ncg * sizeof(int32_t);
626	base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
627	for (i = 0; i < blks; i += fs->fs_frag) {
628		size = fs->fs_bsize;
629		if (i + fs->fs_frag > blks)
630			size = (blks - i) * fs->fs_fsize;
631		if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
632		    cred, &bp)) != 0) {
633			free(base, M_UFSMNT);
634			goto out;
635		}
636		bcopy(bp->b_data, space, (u_int)size);
637		fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
638		space += size;
639		brelse(bp);
640		bp = NULL;
641	}
642	if (fs->fs_contigsumsize > 0) {
643		fs->fs_maxcluster = lp = (int32_t *)space;
644		for (i = 0; i < fs->fs_ncg; i++)
645			*lp++ = fs->fs_contigsumsize;
646	}
647	mp->mnt_data = (qaddr_t)ump;
648	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
649	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
650	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
651	    vfs_getvfs(&mp->mnt_stat.f_fsid))
652		vfs_getnewfsid(mp);
653	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
654	mp->mnt_flag |= MNT_LOCAL;
655	ump->um_mountp = mp;
656	ump->um_dev = dev;
657	ump->um_devvp = devvp;
658	ump->um_nindir = fs->fs_nindir;
659	ump->um_bptrtodb = fs->fs_fsbtodb;
660	ump->um_seqinc = fs->fs_frag;
661	for (i = 0; i < MAXQUOTAS; i++)
662		ump->um_quotas[i] = NULLVP;
663#ifdef FFS_EXTATTR
664	ufs_extattr_uepm_init(&ump->um_extattr);
665#endif
666	devvp->v_rdev->si_mountpoint = mp;
667	ffs_oldfscompat(fs);
668
669	/*
670	 * Set FS local "last mounted on" information (NULL pad)
671	 */
672	copystr(	mp->mnt_stat.f_mntonname,	/* mount point*/
673			fs->fs_fsmnt,			/* copy area*/
674			sizeof(fs->fs_fsmnt) - 1,	/* max size*/
675			&strsize);			/* real size*/
676	bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);
677
678	if( mp->mnt_flag & MNT_ROOTFS) {
679		/*
680		 * Root mount; update timestamp in mount structure.
681		 * this will be used by the common root mount code
682		 * to update the system clock.
683		 */
684		mp->mnt_time = fs->fs_time;
685	}
686
687	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
688	maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1;	/* XXX */
689	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
690		fs->fs_maxfilesize = maxfilesize;		/* XXX */
691	if (ronly == 0) {
692		if ((fs->fs_flags & FS_DOSOFTDEP) &&
693		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
694			free(base, M_UFSMNT);
695			goto out;
696		}
697		if (fs->fs_snapinum[0] != 0)
698			ffs_snapshot_mount(mp);
699		fs->fs_fmod = 1;
700		fs->fs_clean = 0;
701		(void) ffs_sbupdate(ump, MNT_WAIT);
702	}
703#ifdef FFS_EXTATTR
704	/*
705	 * XXX Auto-starting of EAs would go here.
706	 *
707	 * Auto-starting would:
708	 *	- check for /.attribute in the fs, and extattr_start if so
709	 *	- for each file in .attribute, enable that file with
710	 * 	  an attribute of the same name.
711	 * Not clear how to report errors -- probably eat them.
712	 * This would all happen while the file system was busy/not
713	 * available, so would effectively be "atomic".
714	 */
715	/* ufs_extattr_autostart(mp, ump); */
716#endif
717	return (0);
718out:
719	devvp->v_rdev->si_mountpoint = NULL;
720	if (bp)
721		brelse(bp);
722	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
723	if (ump) {
724		free(ump->um_fs, M_UFSMNT);
725		free(ump, M_UFSMNT);
726		mp->mnt_data = (qaddr_t)0;
727	}
728	return (error);
729}
730
731/*
732 * Sanity checks for old file systems.
733 *
734 * XXX - goes away some day.
735 */
736static int
737ffs_oldfscompat(fs)
738	struct fs *fs;
739{
740
741	fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect);	/* XXX */
742	fs->fs_interleave = max(fs->fs_interleave, 1);		/* XXX */
743	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
744		fs->fs_nrpos = 8;				/* XXX */
745	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
746#if 0
747		int i;						/* XXX */
748		u_int64_t sizepb = fs->fs_bsize;		/* XXX */
749								/* XXX */
750		fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1;	/* XXX */
751		for (i = 0; i < NIADDR; i++) {			/* XXX */
752			sizepb *= NINDIR(fs);			/* XXX */
753			fs->fs_maxfilesize += sizepb;		/* XXX */
754		}						/* XXX */
755#endif
756		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
757		fs->fs_qbmask = ~fs->fs_bmask;			/* XXX */
758		fs->fs_qfmask = ~fs->fs_fmask;			/* XXX */
759	}							/* XXX */
760	return (0);
761}
762
763/*
764 * unmount system call
765 */
766int
767ffs_unmount(mp, mntflags, p)
768	struct mount *mp;
769	int mntflags;
770	struct proc *p;
771{
772	register struct ufsmount *ump = VFSTOUFS(mp);
773	register struct fs *fs;
774	int error, flags;
775
776	flags = 0;
777	if (mntflags & MNT_FORCE) {
778		flags |= FORCECLOSE;
779	}
780#ifdef FFS_EXTATTR
781	if ((error = ufs_extattr_stop(mp, p)))
782		if (error != EOPNOTSUPP)
783			printf("ffs_unmount: ufs_extattr_stop returned %d\n",
784			    error);
785	ufs_extattr_uepm_destroy(&ump->um_extattr);
786#endif
787	if (mp->mnt_flag & MNT_SOFTDEP) {
788		if ((error = softdep_flushfiles(mp, flags, p)) != 0)
789			return (error);
790	} else {
791		if ((error = ffs_flushfiles(mp, flags, p)) != 0)
792			return (error);
793	}
794	fs = ump->um_fs;
795	if (fs->fs_ronly == 0) {
796		fs->fs_clean = fs->fs_flags & FS_UNCLEAN ? 0 : 1;
797		error = ffs_sbupdate(ump, MNT_WAIT);
798		if (error) {
799			fs->fs_clean = 0;
800			return (error);
801		}
802	}
803	ump->um_devvp->v_rdev->si_mountpoint = NULL;
804
805	vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0);
806	error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
807		NOCRED, p);
808
809	vrele(ump->um_devvp);
810
811	free(fs->fs_csp[0], M_UFSMNT);
812	free(fs, M_UFSMNT);
813	free(ump, M_UFSMNT);
814	mp->mnt_data = (qaddr_t)0;
815	mp->mnt_flag &= ~MNT_LOCAL;
816	return (error);
817}
818
819/*
820 * Flush out all the files in a filesystem.
821 */
822int
823ffs_flushfiles(mp, flags, p)
824	register struct mount *mp;
825	int flags;
826	struct proc *p;
827{
828	register struct ufsmount *ump;
829	int error;
830
831	ump = VFSTOUFS(mp);
832#ifdef QUOTA
833	if (mp->mnt_flag & MNT_QUOTA) {
834		int i;
835		error = vflush(mp, NULLVP, SKIPSYSTEM|flags);
836		if (error)
837			return (error);
838		for (i = 0; i < MAXQUOTAS; i++) {
839			if (ump->um_quotas[i] == NULLVP)
840				continue;
841			quotaoff(p, mp, i);
842		}
843		/*
844		 * Here we fall through to vflush again to ensure
845		 * that we have gotten rid of all the system vnodes.
846		 */
847	}
848#endif
849	if (ump->um_devvp->v_flag & VCOPYONWRITE) {
850		if ((error = vflush(mp, NULL, SKIPSYSTEM | flags)) != 0)
851			return (error);
852		ffs_snapshot_unmount(mp);
853		/*
854		 * Here we fall through to vflush again to ensure
855		 * that we have gotten rid of all the system vnodes.
856		 */
857	}
858        /*
859	 * Flush all the files.
860	 */
861	if ((error = vflush(mp, NULL, flags)) != 0)
862		return (error);
863	/*
864	 * Flush filesystem metadata.
865	 */
866	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
867	error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p);
868	VOP_UNLOCK(ump->um_devvp, 0, p);
869	return (error);
870}
871
872/*
873 * Get file system statistics.
874 */
875int
876ffs_statfs(mp, sbp, p)
877	struct mount *mp;
878	register struct statfs *sbp;
879	struct proc *p;
880{
881	register struct ufsmount *ump;
882	register struct fs *fs;
883
884	ump = VFSTOUFS(mp);
885	fs = ump->um_fs;
886	if (fs->fs_magic != FS_MAGIC)
887		panic("ffs_statfs");
888	sbp->f_bsize = fs->fs_fsize;
889	sbp->f_iosize = fs->fs_bsize;
890	sbp->f_blocks = fs->fs_dsize;
891	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
892		fs->fs_cstotal.cs_nffree;
893	sbp->f_bavail = freespace(fs, fs->fs_minfree);
894	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
895	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
896	if (sbp != &mp->mnt_stat) {
897		sbp->f_type = mp->mnt_vfc->vfc_typenum;
898		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
899			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
900		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
901			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
902	}
903	return (0);
904}
905
906/*
907 * Go through the disk queues to initiate sandbagged IO;
908 * go through the inodes to write those that have been modified;
909 * initiate the writing of the super block if it has been modified.
910 *
911 * Note: we are always called with the filesystem marked `MPBUSY'.
912 */
913int
914ffs_sync(mp, waitfor, cred, p)
915	struct mount *mp;
916	int waitfor;
917	struct ucred *cred;
918	struct proc *p;
919{
920	struct vnode *nvp, *vp;
921	struct inode *ip;
922	struct ufsmount *ump = VFSTOUFS(mp);
923	struct fs *fs;
924	int error, count, wait, lockreq, allerror = 0;
925
926	fs = ump->um_fs;
927	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
928		printf("fs = %s\n", fs->fs_fsmnt);
929		panic("ffs_sync: rofs mod");
930	}
931	/*
932	 * Write back each (modified) inode.
933	 */
934	wait = 0;
935	lockreq = LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK;
936	if (waitfor == MNT_WAIT) {
937		wait = 1;
938		lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
939	}
940	simple_lock(&mntvnode_slock);
941loop:
942	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
943		/*
944		 * If the vnode that we are about to sync is no longer
945		 * associated with this mount point, start over.
946		 */
947		if (vp->v_mount != mp)
948			goto loop;
949		mtx_enter(&vp->v_interlock, MTX_DEF);
950		nvp = vp->v_mntvnodes.le_next;
951		ip = VTOI(vp);
952		if (vp->v_type == VNON || ((ip->i_flag &
953		     (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
954		     TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
955			mtx_exit(&vp->v_interlock, MTX_DEF);
956			continue;
957		}
958		if (vp->v_type != VCHR) {
959			simple_unlock(&mntvnode_slock);
960			if ((error = vget(vp, lockreq, p)) != 0) {
961				simple_lock(&mntvnode_slock);
962				if (error == ENOENT)
963					goto loop;
964				continue;
965			}
966			if ((error = VOP_FSYNC(vp, cred, waitfor, p)) != 0)
967				allerror = error;
968			VOP_UNLOCK(vp, 0, p);
969			vrele(vp);
970			simple_lock(&mntvnode_slock);
971		} else {
972			simple_unlock(&mntvnode_slock);
973			mtx_exit(&vp->v_interlock, MTX_DEF);
974			UFS_UPDATE(vp, wait);
975			simple_lock(&mntvnode_slock);
976		}
977	}
978	simple_unlock(&mntvnode_slock);
979	/*
980	 * Force stale file system control information to be flushed.
981	 */
982	if (waitfor == MNT_WAIT) {
983		if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
984			allerror = error;
985		/* Flushed work items may create new vnodes to clean */
986		if (count) {
987			simple_lock(&mntvnode_slock);
988			goto loop;
989		}
990	}
991	if (waitfor == MNT_NOWAIT) {
992		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
993		if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0)
994			allerror = error;
995		VOP_UNLOCK(ump->um_devvp, 0, p);
996	}
997#ifdef QUOTA
998	qsync(mp);
999#endif
1000	/*
1001	 * Write back modified superblock.
1002	 */
1003	if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0)
1004		allerror = error;
1005	return (allerror);
1006}
1007
1008/*
1009 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1010 * in from disk.  If it is in core, wait for the lock bit to clear, then
1011 * return the inode locked.  Detection and handling of mount points must be
1012 * done by the calling routine.
1013 */
1014static int ffs_inode_hash_lock;
1015/*
1016 * ffs_inode_hash_lock is a variable to manage mutual exclusion
1017 * of vnode allocation and intertion to the hash, especially to
1018 * avoid holding more than one vnodes for the same inode in the
1019 * hash table. ffs_inode_hash_lock must hence be tested-and-set
1020 * or cleared atomically, accomplished by ffs_inode_hash_mtx.
1021 *
1022 * As vnode allocation may block during MALLOC() and zone
1023 * allocation, we should also do msleep() to give away the CPU
1024 * if anyone else is allocating a vnode. lockmgr is not suitable
1025 * here because someone else may insert to the hash table the
1026 * vnode we are trying to allocate during our sleep, in which
1027 * case the hash table needs to be examined once again after
1028 * waking up.
1029 */
1030static struct mtx ffs_inode_hash_mtx;
1031
1032int
1033ffs_vget(mp, ino, vpp)
1034	struct mount *mp;
1035	ino_t ino;
1036	struct vnode **vpp;
1037{
1038	struct fs *fs;
1039	struct inode *ip;
1040	struct ufsmount *ump;
1041	struct buf *bp;
1042	struct vnode *vp;
1043	dev_t dev;
1044	int error, want_wakeup;
1045
1046	ump = VFSTOUFS(mp);
1047	dev = ump->um_dev;
1048restart:
1049	if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1050		return (0);
1051	}
1052
1053	/*
1054	 * Lock out the creation of new entries in the FFS hash table in
1055	 * case getnewvnode() or MALLOC() blocks, otherwise a duplicate
1056	 * may occur!
1057	 */
1058	mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
1059	if (ffs_inode_hash_lock) {
1060		while (ffs_inode_hash_lock) {
1061			ffs_inode_hash_lock = -1;
1062			msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
1063		}
1064		mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
1065		goto restart;
1066	}
1067	ffs_inode_hash_lock = 1;
1068	mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
1069
1070	/*
1071	 * If this MALLOC() is performed after the getnewvnode()
1072	 * it might block, leaving a vnode with a NULL v_data to be
1073	 * found by ffs_sync() if a sync happens to fire right then,
1074	 * which will cause a panic because ffs_sync() blindly
1075	 * dereferences vp->v_data (as well it should).
1076	 */
1077	MALLOC(ip, struct inode *, sizeof(struct inode),
1078	    ump->um_malloctype, M_WAITOK);
1079
1080	/* Allocate a new vnode/inode. */
1081	error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
1082	if (error) {
1083		/*
1084		 * Do not wake up processes while holding the mutex,
1085		 * otherwise the processes waken up immediately hit
1086		 * themselves into the mutex.
1087		 */
1088		mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
1089		want_wakeup = ffs_inode_hash_lock < 0;
1090		ffs_inode_hash_lock = 0;
1091		mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
1092		if (want_wakeup)
1093			wakeup(&ffs_inode_hash_lock);
1094		*vpp = NULL;
1095		FREE(ip, ump->um_malloctype);
1096		return (error);
1097	}
1098	bzero((caddr_t)ip, sizeof(struct inode));
1099	/*
1100	 * FFS supports lock sharing in the stack of vnodes
1101	 */
1102	vp->v_vnlock = &vp->v_lock;
1103	lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE);
1104	vp->v_data = ip;
1105	ip->i_vnode = vp;
1106	ip->i_fs = fs = ump->um_fs;
1107	ip->i_dev = dev;
1108	ip->i_number = ino;
1109#ifdef QUOTA
1110	{
1111		int i;
1112		for (i = 0; i < MAXQUOTAS; i++)
1113			ip->i_dquot[i] = NODQUOT;
1114	}
1115#endif
1116	/*
1117	 * Put it onto its hash chain and lock it so that other requests for
1118	 * this inode will block if they arrive while we are sleeping waiting
1119	 * for old data structures to be purged or for the contents of the
1120	 * disk portion of this inode to be read.
1121	 */
1122	ufs_ihashins(ip);
1123
1124	/*
1125	 * Do not wake up processes while holding the mutex,
1126	 * otherwise the processes waken up immediately hit
1127	 * themselves into the mutex.
1128	 */
1129	mtx_enter(&ffs_inode_hash_mtx, MTX_DEF);
1130	want_wakeup = ffs_inode_hash_lock < 0;
1131	ffs_inode_hash_lock = 0;
1132	mtx_exit(&ffs_inode_hash_mtx, MTX_DEF);
1133	if (want_wakeup)
1134		wakeup(&ffs_inode_hash_lock);
1135
1136	/* Read in the disk contents for the inode, copy into the inode. */
1137	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1138	    (int)fs->fs_bsize, NOCRED, &bp);
1139	if (error) {
1140		/*
1141		 * The inode does not contain anything useful, so it would
1142		 * be misleading to leave it on its hash chain. With mode
1143		 * still zero, it will be unlinked and returned to the free
1144		 * list by vput().
1145		 */
1146		brelse(bp);
1147		vput(vp);
1148		*vpp = NULL;
1149		return (error);
1150	}
1151	ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1152	if (DOINGSOFTDEP(vp))
1153		softdep_load_inodeblock(ip);
1154	else
1155		ip->i_effnlink = ip->i_nlink;
1156	bqrelse(bp);
1157
1158	/*
1159	 * Initialize the vnode from the inode, check for aliases.
1160	 * Note that the underlying vnode may have changed.
1161	 */
1162	error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1163	if (error) {
1164		vput(vp);
1165		*vpp = NULL;
1166		return (error);
1167	}
1168	/*
1169	 * Finish inode initialization now that aliasing has been resolved.
1170	 */
1171	ip->i_devvp = ump->um_devvp;
1172	VREF(ip->i_devvp);
1173	/*
1174	 * Set up a generation number for this inode if it does not
1175	 * already have one. This should only happen on old filesystems.
1176	 */
1177	if (ip->i_gen == 0) {
1178		ip->i_gen = random() / 2 + 1;
1179		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1180			ip->i_flag |= IN_MODIFIED;
1181	}
1182	/*
1183	 * Ensure that uid and gid are correct. This is a temporary
1184	 * fix until fsck has been changed to do the update.
1185	 */
1186	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
1187		ip->i_uid = ip->i_din.di_ouid;		/* XXX */
1188		ip->i_gid = ip->i_din.di_ogid;		/* XXX */
1189	}						/* XXX */
1190
1191	*vpp = vp;
1192	return (0);
1193}
1194
1195/*
1196 * File handle to vnode
1197 *
1198 * Have to be really careful about stale file handles:
1199 * - check that the inode number is valid
1200 * - call ffs_vget() to get the locked inode
1201 * - check for an unallocated inode (i_mode == 0)
1202 * - check that the given client host has export rights and return
1203 *   those rights via. exflagsp and credanonp
1204 */
1205int
1206ffs_fhtovp(mp, fhp, vpp)
1207	register struct mount *mp;
1208	struct fid *fhp;
1209	struct vnode **vpp;
1210{
1211	register struct ufid *ufhp;
1212	struct fs *fs;
1213
1214	ufhp = (struct ufid *)fhp;
1215	fs = VFSTOUFS(mp)->um_fs;
1216	if (ufhp->ufid_ino < ROOTINO ||
1217	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1218		return (ESTALE);
1219	return (ufs_fhtovp(mp, ufhp, vpp));
1220}
1221
1222/*
1223 * Vnode pointer to File handle
1224 */
1225/* ARGSUSED */
1226int
1227ffs_vptofh(vp, fhp)
1228	struct vnode *vp;
1229	struct fid *fhp;
1230{
1231	register struct inode *ip;
1232	register struct ufid *ufhp;
1233
1234	ip = VTOI(vp);
1235	ufhp = (struct ufid *)fhp;
1236	ufhp->ufid_len = sizeof(struct ufid);
1237	ufhp->ufid_ino = ip->i_number;
1238	ufhp->ufid_gen = ip->i_gen;
1239	return (0);
1240}
1241
1242/*
1243 * Initialize the filesystem; just use ufs_init.
1244 */
1245static int
1246ffs_init(vfsp)
1247	struct vfsconf *vfsp;
1248{
1249
1250	softdep_initialize();
1251	mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF);
1252	return (ufs_init(vfsp));
1253}
1254
1255/*
1256 * Write a superblock and associated information back to disk.
1257 */
1258static int
1259ffs_sbupdate(mp, waitfor)
1260	struct ufsmount *mp;
1261	int waitfor;
1262{
1263	register struct fs *dfs, *fs = mp->um_fs;
1264	register struct buf *bp;
1265	int blks;
1266	caddr_t space;
1267	int i, size, error, allerror = 0;
1268
1269	/*
1270	 * First write back the summary information.
1271	 */
1272	blks = howmany(fs->fs_cssize, fs->fs_fsize);
1273	space = (caddr_t)fs->fs_csp[0];
1274	for (i = 0; i < blks; i += fs->fs_frag) {
1275		size = fs->fs_bsize;
1276		if (i + fs->fs_frag > blks)
1277			size = (blks - i) * fs->fs_fsize;
1278		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1279		    size, 0, 0);
1280		bcopy(space, bp->b_data, (u_int)size);
1281		space += size;
1282		if (waitfor != MNT_WAIT)
1283			bawrite(bp);
1284		else if ((error = bwrite(bp)) != 0)
1285			allerror = error;
1286	}
1287	/*
1288	 * Now write back the superblock itself. If any errors occurred
1289	 * up to this point, then fail so that the superblock avoids
1290	 * being written out as clean.
1291	 */
1292	if (allerror)
1293		return (allerror);
1294	bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
1295	fs->fs_fmod = 0;
1296	fs->fs_time = time_second;
1297	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1298	/* Restore compatibility to old file systems.		   XXX */
1299	dfs = (struct fs *)bp->b_data;				/* XXX */
1300	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
1301		dfs->fs_nrpos = -1;				/* XXX */
1302	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
1303		int32_t *lp, tmp;				/* XXX */
1304								/* XXX */
1305		lp = (int32_t *)&dfs->fs_qbmask;		/* XXX */
1306		tmp = lp[4];					/* XXX */
1307		for (i = 4; i > 0; i--)				/* XXX */
1308			lp[i] = lp[i-1];			/* XXX */
1309		lp[0] = tmp;					/* XXX */
1310	}							/* XXX */
1311	dfs->fs_maxfilesize = mp->um_savedmaxfilesize;		/* XXX */
1312	if (waitfor != MNT_WAIT)
1313		bawrite(bp);
1314	else if ((error = bwrite(bp)) != 0)
1315		allerror = error;
1316	return (allerror);
1317}
1318