ffs_vfsops.c revision 75573
1/*
2 * Copyright (c) 1989, 1991, 1993, 1994
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
34 * $FreeBSD: head/sys/ufs/ffs/ffs_vfsops.c 75573 2001-04-17 05:37:51Z mckusick $
35 */
36
37#include "opt_quota.h"
38#include "opt_ufs.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/namei.h>
43#include <sys/proc.h>
44#include <sys/kernel.h>
45#include <sys/vnode.h>
46#include <sys/mount.h>
47#include <sys/bio.h>
48#include <sys/buf.h>
49#include <sys/conf.h>
50#include <sys/fcntl.h>
51#include <sys/disklabel.h>
52#include <sys/malloc.h>
53#include <sys/mutex.h>
54
55#include <ufs/ufs/extattr.h>
56#include <ufs/ufs/quota.h>
57#include <ufs/ufs/ufsmount.h>
58#include <ufs/ufs/inode.h>
59#include <ufs/ufs/ufs_extern.h>
60
61#include <ufs/ffs/fs.h>
62#include <ufs/ffs/ffs_extern.h>
63
64#include <vm/vm.h>
65#include <vm/vm_page.h>
66
67static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
68
69static int	ffs_sbupdate __P((struct ufsmount *, int));
70int	ffs_reload __P((struct mount *,struct ucred *,struct proc *));
71static int	ffs_oldfscompat __P((struct fs *));
72static int	ffs_init __P((struct vfsconf *));
73
74static struct vfsops ufs_vfsops = {
75	ffs_mount,
76	ufs_start,
77	ffs_unmount,
78	ufs_root,
79	ufs_quotactl,
80	ffs_statfs,
81	ffs_sync,
82	ffs_vget,
83	ffs_fhtovp,
84	ufs_check_export,
85	ffs_vptofh,
86	ffs_init,
87	vfs_stduninit,
88#ifdef UFS_EXTATTR
89	ufs_extattrctl,
90#else
91	vfs_stdextattrctl,
92#endif
93};
94
95VFS_SET(ufs_vfsops, ufs, 0);
96
97/*
98 * ffs_mount
99 *
100 * Called when mounting local physical media
101 *
102 * PARAMETERS:
103 *		mountroot
104 *			mp	mount point structure
105 *			path	NULL (flag for root mount!!!)
106 *			data	<unused>
107 *			ndp	<unused>
108 *			p	process (user credentials check [statfs])
109 *
110 *		mount
111 *			mp	mount point structure
112 *			path	path to mount point
113 *			data	pointer to argument struct in user space
114 *			ndp	mount point namei() return (used for
115 *				credentials on reload), reused to look
116 *				up block device.
117 *			p	process (user credentials check)
118 *
119 * RETURNS:	0	Success
120 *		!0	error number (errno.h)
121 *
122 * LOCK STATE:
123 *
124 *		ENTRY
125 *			mount point is locked
126 *		EXIT
127 *			mount point is locked
128 *
129 * NOTES:
130 *		A NULL path can be used for a flag since the mount
131 *		system call will fail with EFAULT in copyinstr in
132 *		namei() if it is a genuine NULL from the user.
133 */
134int
135ffs_mount(mp, path, data, ndp, p)
136        struct mount		*mp;	/* mount struct pointer*/
137        char			*path;	/* path to mount point*/
138        caddr_t			data;	/* arguments to FS specific mount*/
139        struct nameidata	*ndp;	/* mount point credentials*/
140        struct proc		*p;	/* process requesting mount*/
141{
142	size_t		size;
143	struct vnode	*devvp;
144	struct ufs_args args;
145	struct ufsmount *ump = 0;
146	register struct fs *fs;
147	int error, flags;
148	mode_t accessmode;
149
150	/*
151	 * Use NULL path to indicate we are mounting the root file system.
152	 */
153	if (path == NULL) {
154		if ((error = bdevvp(rootdev, &rootvp))) {
155			printf("ffs_mountroot: can't find rootvp\n");
156			return (error);
157		}
158
159		if ((error = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0)
160			return (error);
161
162		(void)VFS_STATFS(mp, &mp->mnt_stat, p);
163		return (0);
164	}
165
166	/*
167	 * Mounting non-root file system or updating a file system
168	 */
169	if ((error = copyin(data, (caddr_t)&args, sizeof(struct ufs_args)))!= 0)
170		return (error);
171
172	/*
173	 * If updating, check whether changing from read-only to
174	 * read/write; if there is no device name, that's all we do.
175	 */
176	if (mp->mnt_flag & MNT_UPDATE) {
177		ump = VFSTOUFS(mp);
178		fs = ump->um_fs;
179		devvp = ump->um_devvp;
180		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
181			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
182				return (error);
183			flags = WRITECLOSE;
184			if (mp->mnt_flag & MNT_FORCE)
185				flags |= FORCECLOSE;
186			if (mp->mnt_flag & MNT_SOFTDEP) {
187				error = softdep_flushfiles(mp, flags, p);
188			} else {
189				error = ffs_flushfiles(mp, flags, p);
190			}
191			if (error) {
192				vn_finished_write(mp);
193				return (error);
194			}
195			fs->fs_ronly = 1;
196			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
197				fs->fs_clean = 1;
198			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
199				fs->fs_ronly = 0;
200				fs->fs_clean = 0;
201				vn_finished_write(mp);
202				return (error);
203			}
204			vn_finished_write(mp);
205		}
206		if ((mp->mnt_flag & MNT_RELOAD) &&
207		    (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)) != 0)
208			return (error);
209		if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
210			/*
211			 * If upgrade to read-write by non-root, then verify
212			 * that user has necessary permissions on the device.
213			 */
214			if (p->p_ucred->cr_uid != 0) {
215				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
216				if ((error = VOP_ACCESS(devvp, VREAD | VWRITE,
217				    p->p_ucred, p)) != 0) {
218					VOP_UNLOCK(devvp, 0, p);
219					return (error);
220				}
221				VOP_UNLOCK(devvp, 0, p);
222			}
223			fs->fs_flags &= ~FS_UNCLEAN;
224			if (fs->fs_clean == 0) {
225				fs->fs_flags |= FS_UNCLEAN;
226				if ((mp->mnt_flag & MNT_FORCE) ||
227				    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
228				     (fs->fs_flags & FS_DOSOFTDEP))) {
229					printf("WARNING: %s was not %s\n",
230					   fs->fs_fsmnt, "properly dismounted");
231				} else {
232					printf(
233"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
234					    fs->fs_fsmnt);
235					return (EPERM);
236				}
237			}
238			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
239				return (error);
240			fs->fs_ronly = 0;
241			fs->fs_clean = 0;
242			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
243				vn_finished_write(mp);
244				return (error);
245			}
246			/* check to see if we need to start softdep */
247			if ((fs->fs_flags & FS_DOSOFTDEP) &&
248			    (error = softdep_mount(devvp, mp, fs, p->p_ucred))){
249				vn_finished_write(mp);
250				return (error);
251			}
252			if (fs->fs_snapinum[0] != 0)
253				ffs_snapshot_mount(mp);
254			vn_finished_write(mp);
255		}
256		/*
257		 * Soft updates is incompatible with "async",
258		 * so if we are doing softupdates stop the user
259		 * from setting the async flag in an update.
260		 * Softdep_mount() clears it in an initial mount
261		 * or ro->rw remount.
262		 */
263		if (mp->mnt_flag & MNT_SOFTDEP)
264			mp->mnt_flag &= ~MNT_ASYNC;
265		/*
266		 * If not updating name, process export requests.
267		 */
268		if (args.fspec == 0)
269			return (vfs_export(mp, &ump->um_export, &args.export));
270		/*
271		 * If this is a snapshot request, take the snapshot.
272		 */
273		if (mp->mnt_flag & MNT_SNAPSHOT)
274			return (ffs_snapshot(mp, args.fspec));
275	}
276
277	/*
278	 * Not an update, or updating the name: look up the name
279	 * and verify that it refers to a sensible block device.
280	 */
281	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
282	if ((error = namei(ndp)) != 0)
283		return (error);
284	NDFREE(ndp, NDF_ONLY_PNBUF);
285	devvp = ndp->ni_vp;
286	if (!vn_isdisk(devvp, &error)) {
287		vrele(devvp);
288		return (error);
289	}
290
291	/*
292	 * If mount by non-root, then verify that user has necessary
293	 * permissions on the device.
294	 */
295	if (p->p_ucred->cr_uid != 0) {
296		accessmode = VREAD;
297		if ((mp->mnt_flag & MNT_RDONLY) == 0)
298			accessmode |= VWRITE;
299		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
300		if ((error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p))!= 0){
301			vput(devvp);
302			return (error);
303		}
304		VOP_UNLOCK(devvp, 0, p);
305	}
306
307	if (mp->mnt_flag & MNT_UPDATE) {
308		/*
309		 * Update only
310		 *
311		 * If it's not the same vnode, or at least the same device
312		 * then it's not correct.
313		 */
314
315		if (devvp != ump->um_devvp &&
316		    devvp->v_rdev != ump->um_devvp->v_rdev)
317			error = EINVAL;	/* needs translation */
318		vrele(devvp);
319		if (error)
320			return (error);
321	} else {
322		/*
323		 * New mount
324		 *
325		 * We need the name for the mount point (also used for
326		 * "last mounted on") copied in. If an error occurs,
327		 * the mount point is discarded by the upper level code.
328		 * Note that vfs_mount() populates f_mntonname for us.
329		 */
330		if ((error = ffs_mountfs(devvp, mp, p, M_FFSNODE)) != 0) {
331			vrele(devvp);
332			return (error);
333		}
334	}
335	/*
336	 * Save "mounted from" device name info for mount point (NULL pad).
337	 */
338	copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
339	bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
340	/*
341	 * Initialize filesystem stat information in mount struct.
342	 */
343	(void)VFS_STATFS(mp, &mp->mnt_stat, p);
344	return (0);
345}
346
347/*
348 * Reload all incore data for a filesystem (used after running fsck on
349 * the root filesystem and finding things to fix). The filesystem must
350 * be mounted read-only.
351 *
352 * Things to do to update the mount:
353 *	1) invalidate all cached meta-data.
354 *	2) re-read superblock from disk.
355 *	3) re-read summary information from disk.
356 *	4) invalidate all inactive vnodes.
357 *	5) invalidate all cached file data.
358 *	6) re-read inode data for all active vnodes.
359 */
360int
361ffs_reload(mp, cred, p)
362	register struct mount *mp;
363	struct ucred *cred;
364	struct proc *p;
365{
366	register struct vnode *vp, *nvp, *devvp;
367	struct inode *ip;
368	void *space;
369	struct buf *bp;
370	struct fs *fs, *newfs;
371	struct partinfo dpart;
372	dev_t dev;
373	int i, blks, size, error;
374	int32_t *lp;
375
376	if ((mp->mnt_flag & MNT_RDONLY) == 0)
377		return (EINVAL);
378	/*
379	 * Step 1: invalidate all cached meta-data.
380	 */
381	devvp = VFSTOUFS(mp)->um_devvp;
382	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
383	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
384	VOP_UNLOCK(devvp, 0, p);
385	if (error)
386		panic("ffs_reload: dirty1");
387
388	dev = devvp->v_rdev;
389
390	/*
391	 * Only VMIO the backing device if the backing device is a real
392	 * block device.  See ffs_mountmfs() for more details.
393	 */
394	if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
395		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
396		vfs_object_create(devvp, p, p->p_ucred);
397		mtx_lock(&devvp->v_interlock);
398		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
399	}
400
401	/*
402	 * Step 2: re-read superblock from disk.
403	 */
404	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
405		size = DEV_BSIZE;
406	else
407		size = dpart.disklab->d_secsize;
408	if ((error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) != 0)
409		return (error);
410	newfs = (struct fs *)bp->b_data;
411	if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
412		newfs->fs_bsize < sizeof(struct fs)) {
413			brelse(bp);
414			return (EIO);		/* XXX needs translation */
415	}
416	fs = VFSTOUFS(mp)->um_fs;
417	/*
418	 * Copy pointer fields back into superblock before copying in	XXX
419	 * new superblock. These should really be in the ufsmount.	XXX
420	 * Note that important parameters (eg fs_ncg) are unchanged.
421	 */
422	newfs->fs_csp = fs->fs_csp;
423	newfs->fs_maxcluster = fs->fs_maxcluster;
424	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
425	if (fs->fs_sbsize < SBSIZE)
426		bp->b_flags |= B_INVAL | B_NOCACHE;
427	brelse(bp);
428	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
429	ffs_oldfscompat(fs);
430
431	/*
432	 * Step 3: re-read summary information from disk.
433	 */
434	blks = howmany(fs->fs_cssize, fs->fs_fsize);
435	space = fs->fs_csp;
436	for (i = 0; i < blks; i += fs->fs_frag) {
437		size = fs->fs_bsize;
438		if (i + fs->fs_frag > blks)
439			size = (blks - i) * fs->fs_fsize;
440		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
441		    NOCRED, &bp);
442		if (error)
443			return (error);
444		bcopy(bp->b_data, space, (u_int)size);
445		space = (char *)space + size;
446		brelse(bp);
447	}
448	/*
449	 * We no longer know anything about clusters per cylinder group.
450	 */
451	if (fs->fs_contigsumsize > 0) {
452		lp = fs->fs_maxcluster;
453		for (i = 0; i < fs->fs_ncg; i++)
454			*lp++ = fs->fs_contigsumsize;
455	}
456
457loop:
458	mtx_lock(&mntvnode_mtx);
459	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
460		if (vp->v_mount != mp) {
461			mtx_unlock(&mntvnode_mtx);
462			goto loop;
463		}
464		nvp = LIST_NEXT(vp, v_mntvnodes);
465		/*
466		 * Step 4: invalidate all inactive vnodes.
467		 */
468		if (vrecycle(vp, &mntvnode_mtx, p))
469			goto loop;
470		/*
471		 * Step 5: invalidate all cached file data.
472		 */
473		mtx_lock(&vp->v_interlock);
474		mtx_unlock(&mntvnode_mtx);
475		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
476			goto loop;
477		}
478		if (vinvalbuf(vp, 0, cred, p, 0, 0))
479			panic("ffs_reload: dirty2");
480		/*
481		 * Step 6: re-read inode data for all active vnodes.
482		 */
483		ip = VTOI(vp);
484		error =
485		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
486		    (int)fs->fs_bsize, NOCRED, &bp);
487		if (error) {
488			vput(vp);
489			return (error);
490		}
491		ip->i_din = *((struct dinode *)bp->b_data +
492		    ino_to_fsbo(fs, ip->i_number));
493		ip->i_effnlink = ip->i_nlink;
494		brelse(bp);
495		vput(vp);
496		mtx_lock(&mntvnode_mtx);
497	}
498	mtx_unlock(&mntvnode_mtx);
499	return (0);
500}
501
502#include <sys/sysctl.h>
503int bigcgs = 0;
504SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
505
506/*
507 * Common code for mount and mountroot
508 */
509int
510ffs_mountfs(devvp, mp, p, malloctype)
511	register struct vnode *devvp;
512	struct mount *mp;
513	struct proc *p;
514	struct malloc_type *malloctype;
515{
516	register struct ufsmount *ump;
517	struct buf *bp;
518	register struct fs *fs;
519	dev_t dev;
520	struct partinfo dpart;
521	void *space;
522	int error, i, blks, size, ronly;
523	int32_t *lp;
524	struct ucred *cred;
525	u_int64_t maxfilesize;					/* XXX */
526	size_t strsize;
527	int ncount;
528
529	dev = devvp->v_rdev;
530	cred = p ? p->p_ucred : NOCRED;
531	/*
532	 * Disallow multiple mounts of the same device.
533	 * Disallow mounting of a device that is currently in use
534	 * (except for root, which might share swap device for miniroot).
535	 * Flush out any old buffers remaining from a previous use.
536	 */
537	error = vfs_mountedon(devvp);
538	if (error)
539		return (error);
540	ncount = vcount(devvp);
541
542	if (ncount > 1 && devvp != rootvp)
543		return (EBUSY);
544	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
545	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
546	VOP_UNLOCK(devvp, 0, p);
547	if (error)
548		return (error);
549
550	/*
551	 * Only VMIO the backing device if the backing device is a real
552	 * block device.  This excludes the original MFS implementation.
553	 * Note that it is optional that the backing device be VMIOed.  This
554	 * increases the opportunity for metadata caching.
555	 */
556	if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
557		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
558		vfs_object_create(devvp, p, cred);
559		mtx_lock(&devvp->v_interlock);
560		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
561	}
562
563	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
564	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
565	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
566	VOP_UNLOCK(devvp, 0, p);
567	if (error)
568		return (error);
569	if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max)
570		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
571	if (mp->mnt_iosize_max > MAXPHYS)
572		mp->mnt_iosize_max = MAXPHYS;
573
574	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
575		size = DEV_BSIZE;
576	else
577		size = dpart.disklab->d_secsize;
578
579	bp = NULL;
580	ump = NULL;
581	if ((error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) != 0)
582		goto out;
583	fs = (struct fs *)bp->b_data;
584	if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
585	    fs->fs_bsize < sizeof(struct fs)) {
586		error = EINVAL;		/* XXX needs translation */
587		goto out;
588	}
589	fs->fs_fmod = 0;
590	fs->fs_flags &= ~FS_UNCLEAN;
591	if (fs->fs_clean == 0) {
592		fs->fs_flags |= FS_UNCLEAN;
593		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
594		    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
595		     (fs->fs_flags & FS_DOSOFTDEP))) {
596			printf(
597"WARNING: %s was not properly dismounted\n",
598			    fs->fs_fsmnt);
599		} else {
600			printf(
601"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
602			    fs->fs_fsmnt);
603			error = EPERM;
604			goto out;
605		}
606	}
607	/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
608	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
609		error = EROFS;          /* needs translation */
610		goto out;
611	}
612	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
613	ump->um_malloctype = malloctype;
614	ump->um_i_effnlink_valid = 1;
615	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
616	    M_WAITOK);
617	ump->um_blkatoff = ffs_blkatoff;
618	ump->um_truncate = ffs_truncate;
619	ump->um_update = ffs_update;
620	ump->um_valloc = ffs_valloc;
621	ump->um_vfree = ffs_vfree;
622	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
623	if (fs->fs_sbsize < SBSIZE)
624		bp->b_flags |= B_INVAL | B_NOCACHE;
625	brelse(bp);
626	bp = NULL;
627	fs = ump->um_fs;
628	fs->fs_ronly = ronly;
629	size = fs->fs_cssize;
630	blks = howmany(size, fs->fs_fsize);
631	if (fs->fs_contigsumsize > 0)
632		size += fs->fs_ncg * sizeof(int32_t);
633	size += fs->fs_ncg * sizeof(u_int8_t);
634	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
635	fs->fs_csp = space;
636	for (i = 0; i < blks; i += fs->fs_frag) {
637		size = fs->fs_bsize;
638		if (i + fs->fs_frag > blks)
639			size = (blks - i) * fs->fs_fsize;
640		if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
641		    cred, &bp)) != 0) {
642			free(fs->fs_csp, M_UFSMNT);
643			goto out;
644		}
645		bcopy(bp->b_data, space, (u_int)size);
646		space = (char *)space + size;
647		brelse(bp);
648		bp = NULL;
649	}
650	if (fs->fs_contigsumsize > 0) {
651		fs->fs_maxcluster = lp = space;
652		for (i = 0; i < fs->fs_ncg; i++)
653			*lp++ = fs->fs_contigsumsize;
654	}
655	size = fs->fs_ncg * sizeof(u_int8_t);
656	fs->fs_contigdirs = (u_int8_t *)space;
657	space = (u_int8_t *)space + size;
658	bzero(fs->fs_contigdirs, size);
659	/* Compatibility for old filesystems 	   XXX */
660	if (fs->fs_avgfilesize <= 0)		/* XXX */
661		fs->fs_avgfilesize = AVFILESIZ;	/* XXX */
662	if (fs->fs_avgfpdir <= 0)		/* XXX */
663		fs->fs_avgfpdir = AFPDIR;	/* XXX */
664	mp->mnt_data = (qaddr_t)ump;
665	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
666	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
667	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
668	    vfs_getvfs(&mp->mnt_stat.f_fsid))
669		vfs_getnewfsid(mp);
670	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
671	mp->mnt_flag |= MNT_LOCAL;
672	ump->um_mountp = mp;
673	ump->um_dev = dev;
674	ump->um_devvp = devvp;
675	ump->um_nindir = fs->fs_nindir;
676	ump->um_bptrtodb = fs->fs_fsbtodb;
677	ump->um_seqinc = fs->fs_frag;
678	for (i = 0; i < MAXQUOTAS; i++)
679		ump->um_quotas[i] = NULLVP;
680#ifdef UFS_EXTATTR
681	ufs_extattr_uepm_init(&ump->um_extattr);
682#endif
683	devvp->v_rdev->si_mountpoint = mp;
684	ffs_oldfscompat(fs);
685
686	/*
687	 * Set FS local "last mounted on" information (NULL pad)
688	 */
689	copystr(	mp->mnt_stat.f_mntonname,	/* mount point*/
690			fs->fs_fsmnt,			/* copy area*/
691			sizeof(fs->fs_fsmnt) - 1,	/* max size*/
692			&strsize);			/* real size*/
693	bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);
694
695	if( mp->mnt_flag & MNT_ROOTFS) {
696		/*
697		 * Root mount; update timestamp in mount structure.
698		 * this will be used by the common root mount code
699		 * to update the system clock.
700		 */
701		mp->mnt_time = fs->fs_time;
702	}
703
704	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
705	maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1;	/* XXX */
706	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
707		fs->fs_maxfilesize = maxfilesize;		/* XXX */
708	if (bigcgs) {
709		if (fs->fs_sparecon[0] <= 0)
710			fs->fs_sparecon[0] = fs->fs_cgsize;
711		fs->fs_cgsize = fs->fs_bsize;
712	}
713	if (ronly == 0) {
714		if ((fs->fs_flags & FS_DOSOFTDEP) &&
715		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
716			free(fs->fs_csp, M_UFSMNT);
717			goto out;
718		}
719		if (fs->fs_snapinum[0] != 0)
720			ffs_snapshot_mount(mp);
721		fs->fs_fmod = 1;
722		fs->fs_clean = 0;
723		(void) ffs_sbupdate(ump, MNT_WAIT);
724	}
725#ifdef UFS_EXTATTR
726#ifdef UFS_EXTATTR_AUTOSTART
727	/*
728	 *
729	 * Auto-starting does the following:
730	 *	- check for /.attribute in the fs, and extattr_start if so
731	 *	- for each file in .attribute, enable that file with
732	 * 	  an attribute of the same name.
733	 * Not clear how to report errors -- probably eat them.
734	 * This would all happen while the file system was busy/not
735	 * available, so would effectively be "atomic".
736	 */
737	(void) ufs_extattr_autostart(mp, p);
738#endif /* !UFS_EXTATTR_AUTOSTART */
739#endif /* !UFS_EXTATTR */
740	return (0);
741out:
742	devvp->v_rdev->si_mountpoint = NULL;
743	if (bp)
744		brelse(bp);
745	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
746	if (ump) {
747		free(ump->um_fs, M_UFSMNT);
748		free(ump, M_UFSMNT);
749		mp->mnt_data = (qaddr_t)0;
750	}
751	return (error);
752}
753
754/*
755 * Sanity checks for old file systems.
756 *
757 * XXX - goes away some day.
758 */
759static int
760ffs_oldfscompat(fs)
761	struct fs *fs;
762{
763
764	fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect);	/* XXX */
765	fs->fs_interleave = max(fs->fs_interleave, 1);		/* XXX */
766	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
767		fs->fs_nrpos = 8;				/* XXX */
768	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
769#if 0
770		int i;						/* XXX */
771		u_int64_t sizepb = fs->fs_bsize;		/* XXX */
772								/* XXX */
773		fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1;	/* XXX */
774		for (i = 0; i < NIADDR; i++) {			/* XXX */
775			sizepb *= NINDIR(fs);			/* XXX */
776			fs->fs_maxfilesize += sizepb;		/* XXX */
777		}						/* XXX */
778#endif
779		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
780		fs->fs_qbmask = ~fs->fs_bmask;			/* XXX */
781		fs->fs_qfmask = ~fs->fs_fmask;			/* XXX */
782	}							/* XXX */
783	return (0);
784}
785
786/*
787 * unmount system call
788 */
789int
790ffs_unmount(mp, mntflags, p)
791	struct mount *mp;
792	int mntflags;
793	struct proc *p;
794{
795	register struct ufsmount *ump = VFSTOUFS(mp);
796	register struct fs *fs;
797	int error, flags;
798
799	flags = 0;
800	if (mntflags & MNT_FORCE) {
801		flags |= FORCECLOSE;
802	}
803#ifdef UFS_EXTATTR
804	if ((error = ufs_extattr_stop(mp, p)))
805		if (error != EOPNOTSUPP)
806			printf("ffs_unmount: ufs_extattr_stop returned %d\n",
807			    error);
808	ufs_extattr_uepm_destroy(&ump->um_extattr);
809#endif
810	if (mp->mnt_flag & MNT_SOFTDEP) {
811		if ((error = softdep_flushfiles(mp, flags, p)) != 0)
812			return (error);
813	} else {
814		if ((error = ffs_flushfiles(mp, flags, p)) != 0)
815			return (error);
816	}
817	fs = ump->um_fs;
818	if (bigcgs) {
819		fs->fs_cgsize = fs->fs_sparecon[0];
820		fs->fs_sparecon[0] = 0;
821	}
822	if (fs->fs_ronly == 0) {
823		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
824		error = ffs_sbupdate(ump, MNT_WAIT);
825		if (error) {
826			fs->fs_clean = 0;
827			return (error);
828		}
829	}
830	ump->um_devvp->v_rdev->si_mountpoint = NULL;
831
832	vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0);
833	error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
834		NOCRED, p);
835
836	vrele(ump->um_devvp);
837
838	free(fs->fs_csp, M_UFSMNT);
839	free(fs, M_UFSMNT);
840	free(ump, M_UFSMNT);
841	mp->mnt_data = (qaddr_t)0;
842	mp->mnt_flag &= ~MNT_LOCAL;
843	return (error);
844}
845
846/*
847 * Flush out all the files in a filesystem.
848 */
849int
850ffs_flushfiles(mp, flags, p)
851	register struct mount *mp;
852	int flags;
853	struct proc *p;
854{
855	register struct ufsmount *ump;
856	int error;
857
858	ump = VFSTOUFS(mp);
859#ifdef QUOTA
860	if (mp->mnt_flag & MNT_QUOTA) {
861		int i;
862		error = vflush(mp, NULLVP, SKIPSYSTEM|flags);
863		if (error)
864			return (error);
865		for (i = 0; i < MAXQUOTAS; i++) {
866			if (ump->um_quotas[i] == NULLVP)
867				continue;
868			quotaoff(p, mp, i);
869		}
870		/*
871		 * Here we fall through to vflush again to ensure
872		 * that we have gotten rid of all the system vnodes.
873		 */
874	}
875#endif
876	if (ump->um_devvp->v_flag & VCOPYONWRITE) {
877		if ((error = vflush(mp, NULL, SKIPSYSTEM | flags)) != 0)
878			return (error);
879		ffs_snapshot_unmount(mp);
880		/*
881		 * Here we fall through to vflush again to ensure
882		 * that we have gotten rid of all the system vnodes.
883		 */
884	}
885        /*
886	 * Flush all the files.
887	 */
888	if ((error = vflush(mp, NULL, flags)) != 0)
889		return (error);
890	/*
891	 * Flush filesystem metadata.
892	 */
893	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
894	error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p);
895	VOP_UNLOCK(ump->um_devvp, 0, p);
896	return (error);
897}
898
899/*
900 * Get file system statistics.
901 */
902int
903ffs_statfs(mp, sbp, p)
904	struct mount *mp;
905	register struct statfs *sbp;
906	struct proc *p;
907{
908	register struct ufsmount *ump;
909	register struct fs *fs;
910
911	ump = VFSTOUFS(mp);
912	fs = ump->um_fs;
913	if (fs->fs_magic != FS_MAGIC)
914		panic("ffs_statfs");
915	sbp->f_bsize = fs->fs_fsize;
916	sbp->f_iosize = fs->fs_bsize;
917	sbp->f_blocks = fs->fs_dsize;
918	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
919		fs->fs_cstotal.cs_nffree;
920	sbp->f_bavail = freespace(fs, fs->fs_minfree);
921	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
922	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
923	if (sbp != &mp->mnt_stat) {
924		sbp->f_type = mp->mnt_vfc->vfc_typenum;
925		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
926			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
927		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
928			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
929	}
930	return (0);
931}
932
933/*
934 * Go through the disk queues to initiate sandbagged IO;
935 * go through the inodes to write those that have been modified;
936 * initiate the writing of the super block if it has been modified.
937 *
938 * Note: we are always called with the filesystem marked `MPBUSY'.
939 */
940int
941ffs_sync(mp, waitfor, cred, p)
942	struct mount *mp;
943	int waitfor;
944	struct ucred *cred;
945	struct proc *p;
946{
947	struct vnode *nvp, *vp;
948	struct inode *ip;
949	struct ufsmount *ump = VFSTOUFS(mp);
950	struct fs *fs;
951	int error, count, wait, lockreq, allerror = 0;
952
953	fs = ump->um_fs;
954	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
955		printf("fs = %s\n", fs->fs_fsmnt);
956		panic("ffs_sync: rofs mod");
957	}
958	/*
959	 * Write back each (modified) inode.
960	 */
961	wait = 0;
962	lockreq = LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK;
963	if (waitfor == MNT_WAIT) {
964		wait = 1;
965		lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
966	}
967	mtx_lock(&mntvnode_mtx);
968loop:
969	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
970		/*
971		 * If the vnode that we are about to sync is no longer
972		 * associated with this mount point, start over.
973		 */
974		if (vp->v_mount != mp)
975			goto loop;
976		mtx_lock(&vp->v_interlock);
977		nvp = LIST_NEXT(vp, v_mntvnodes);
978		ip = VTOI(vp);
979		if (vp->v_type == VNON || ((ip->i_flag &
980		     (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
981		     TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
982			mtx_unlock(&vp->v_interlock);
983			continue;
984		}
985		if (vp->v_type != VCHR) {
986			mtx_unlock(&mntvnode_mtx);
987			if ((error = vget(vp, lockreq, p)) != 0) {
988				mtx_lock(&mntvnode_mtx);
989				if (error == ENOENT)
990					goto loop;
991				continue;
992			}
993			if ((error = VOP_FSYNC(vp, cred, waitfor, p)) != 0)
994				allerror = error;
995			VOP_UNLOCK(vp, 0, p);
996			vrele(vp);
997			mtx_lock(&mntvnode_mtx);
998		} else {
999			mtx_unlock(&mntvnode_mtx);
1000			mtx_unlock(&vp->v_interlock);
1001			UFS_UPDATE(vp, wait);
1002			mtx_lock(&mntvnode_mtx);
1003		}
1004	}
1005	mtx_unlock(&mntvnode_mtx);
1006	/*
1007	 * Force stale file system control information to be flushed.
1008	 */
1009	if (waitfor == MNT_WAIT) {
1010		if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
1011			allerror = error;
1012		/* Flushed work items may create new vnodes to clean */
1013		if (count) {
1014			mtx_lock(&mntvnode_mtx);
1015			goto loop;
1016		}
1017	}
1018#ifdef QUOTA
1019	qsync(mp);
1020#endif
1021	if (waitfor != MNT_LAZY) {
1022		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
1023		if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0)
1024			allerror = error;
1025		VOP_UNLOCK(ump->um_devvp, 0, p);
1026	}
1027	/*
1028	 * Write back modified superblock.
1029	 */
1030	if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0)
1031		allerror = error;
1032	return (allerror);
1033}
1034
1035/*
1036 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1037 * in from disk.  If it is in core, wait for the lock bit to clear, then
1038 * return the inode locked.  Detection and handling of mount points must be
1039 * done by the calling routine.
1040 */
1041static int ffs_inode_hash_lock;
1042/*
1043 * ffs_inode_hash_lock is a variable to manage mutual exclusion
1044 * of vnode allocation and intertion to the hash, especially to
1045 * avoid holding more than one vnodes for the same inode in the
1046 * hash table. ffs_inode_hash_lock must hence be tested-and-set
1047 * or cleared atomically, accomplished by ffs_inode_hash_mtx.
1048 *
1049 * As vnode allocation may block during MALLOC() and zone
1050 * allocation, we should also do msleep() to give away the CPU
1051 * if anyone else is allocating a vnode. lockmgr is not suitable
1052 * here because someone else may insert to the hash table the
1053 * vnode we are trying to allocate during our sleep, in which
1054 * case the hash table needs to be examined once again after
1055 * waking up.
1056 */
1057static struct mtx ffs_inode_hash_mtx;
1058
1059int
1060ffs_vget(mp, ino, vpp)
1061	struct mount *mp;
1062	ino_t ino;
1063	struct vnode **vpp;
1064{
1065	struct fs *fs;
1066	struct inode *ip;
1067	struct ufsmount *ump;
1068	struct buf *bp;
1069	struct vnode *vp;
1070	dev_t dev;
1071	int error, want_wakeup;
1072
1073	ump = VFSTOUFS(mp);
1074	dev = ump->um_dev;
1075restart:
1076	if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1077		return (0);
1078	}
1079
1080	/*
1081	 * Lock out the creation of new entries in the FFS hash table in
1082	 * case getnewvnode() or MALLOC() blocks, otherwise a duplicate
1083	 * may occur!
1084	 */
1085	mtx_lock(&ffs_inode_hash_mtx);
1086	if (ffs_inode_hash_lock) {
1087		while (ffs_inode_hash_lock) {
1088			ffs_inode_hash_lock = -1;
1089			msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
1090		}
1091		mtx_unlock(&ffs_inode_hash_mtx);
1092		goto restart;
1093	}
1094	ffs_inode_hash_lock = 1;
1095	mtx_unlock(&ffs_inode_hash_mtx);
1096
1097	/*
1098	 * If this MALLOC() is performed after the getnewvnode()
1099	 * it might block, leaving a vnode with a NULL v_data to be
1100	 * found by ffs_sync() if a sync happens to fire right then,
1101	 * which will cause a panic because ffs_sync() blindly
1102	 * dereferences vp->v_data (as well it should).
1103	 */
1104	MALLOC(ip, struct inode *, sizeof(struct inode),
1105	    ump->um_malloctype, M_WAITOK);
1106
1107	/* Allocate a new vnode/inode. */
1108	error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
1109	if (error) {
1110		/*
1111		 * Do not wake up processes while holding the mutex,
1112		 * otherwise the processes waken up immediately hit
1113		 * themselves into the mutex.
1114		 */
1115		mtx_lock(&ffs_inode_hash_mtx);
1116		want_wakeup = ffs_inode_hash_lock < 0;
1117		ffs_inode_hash_lock = 0;
1118		mtx_unlock(&ffs_inode_hash_mtx);
1119		if (want_wakeup)
1120			wakeup(&ffs_inode_hash_lock);
1121		*vpp = NULL;
1122		FREE(ip, ump->um_malloctype);
1123		return (error);
1124	}
1125	bzero((caddr_t)ip, sizeof(struct inode));
1126	/*
1127	 * FFS supports lock sharing in the stack of vnodes
1128	 */
1129	vp->v_vnlock = &vp->v_lock;
1130	lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE);
1131	vp->v_data = ip;
1132	ip->i_vnode = vp;
1133	ip->i_fs = fs = ump->um_fs;
1134	ip->i_dev = dev;
1135	ip->i_number = ino;
1136#ifdef QUOTA
1137	{
1138		int i;
1139		for (i = 0; i < MAXQUOTAS; i++)
1140			ip->i_dquot[i] = NODQUOT;
1141	}
1142#endif
1143	/*
1144	 * Put it onto its hash chain and lock it so that other requests for
1145	 * this inode will block if they arrive while we are sleeping waiting
1146	 * for old data structures to be purged or for the contents of the
1147	 * disk portion of this inode to be read.
1148	 */
1149	ufs_ihashins(ip);
1150
1151	/*
1152	 * Do not wake up processes while holding the mutex,
1153	 * otherwise the processes waken up immediately hit
1154	 * themselves into the mutex.
1155	 */
1156	mtx_lock(&ffs_inode_hash_mtx);
1157	want_wakeup = ffs_inode_hash_lock < 0;
1158	ffs_inode_hash_lock = 0;
1159	mtx_unlock(&ffs_inode_hash_mtx);
1160	if (want_wakeup)
1161		wakeup(&ffs_inode_hash_lock);
1162
1163	/* Read in the disk contents for the inode, copy into the inode. */
1164	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1165	    (int)fs->fs_bsize, NOCRED, &bp);
1166	if (error) {
1167		/*
1168		 * The inode does not contain anything useful, so it would
1169		 * be misleading to leave it on its hash chain. With mode
1170		 * still zero, it will be unlinked and returned to the free
1171		 * list by vput().
1172		 */
1173		brelse(bp);
1174		vput(vp);
1175		*vpp = NULL;
1176		return (error);
1177	}
1178	ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1179	if (DOINGSOFTDEP(vp))
1180		softdep_load_inodeblock(ip);
1181	else
1182		ip->i_effnlink = ip->i_nlink;
1183	bqrelse(bp);
1184
1185	/*
1186	 * Initialize the vnode from the inode, check for aliases.
1187	 * Note that the underlying vnode may have changed.
1188	 */
1189	error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1190	if (error) {
1191		vput(vp);
1192		*vpp = NULL;
1193		return (error);
1194	}
1195	/*
1196	 * Finish inode initialization now that aliasing has been resolved.
1197	 */
1198	ip->i_devvp = ump->um_devvp;
1199	VREF(ip->i_devvp);
1200	/*
1201	 * Set up a generation number for this inode if it does not
1202	 * already have one. This should only happen on old filesystems.
1203	 */
1204	if (ip->i_gen == 0) {
1205		ip->i_gen = random() / 2 + 1;
1206		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1207			ip->i_flag |= IN_MODIFIED;
1208	}
1209	/*
1210	 * Ensure that uid and gid are correct. This is a temporary
1211	 * fix until fsck has been changed to do the update.
1212	 */
1213	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
1214		ip->i_uid = ip->i_din.di_ouid;		/* XXX */
1215		ip->i_gid = ip->i_din.di_ogid;		/* XXX */
1216	}						/* XXX */
1217
1218	*vpp = vp;
1219	return (0);
1220}
1221
1222/*
1223 * File handle to vnode
1224 *
1225 * Have to be really careful about stale file handles:
1226 * - check that the inode number is valid
1227 * - call ffs_vget() to get the locked inode
1228 * - check for an unallocated inode (i_mode == 0)
1229 * - check that the given client host has export rights and return
1230 *   those rights via. exflagsp and credanonp
1231 */
1232int
1233ffs_fhtovp(mp, fhp, vpp)
1234	register struct mount *mp;
1235	struct fid *fhp;
1236	struct vnode **vpp;
1237{
1238	register struct ufid *ufhp;
1239	struct fs *fs;
1240
1241	ufhp = (struct ufid *)fhp;
1242	fs = VFSTOUFS(mp)->um_fs;
1243	if (ufhp->ufid_ino < ROOTINO ||
1244	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1245		return (ESTALE);
1246	return (ufs_fhtovp(mp, ufhp, vpp));
1247}
1248
1249/*
1250 * Vnode pointer to File handle
1251 */
1252/* ARGSUSED */
1253int
1254ffs_vptofh(vp, fhp)
1255	struct vnode *vp;
1256	struct fid *fhp;
1257{
1258	register struct inode *ip;
1259	register struct ufid *ufhp;
1260
1261	ip = VTOI(vp);
1262	ufhp = (struct ufid *)fhp;
1263	ufhp->ufid_len = sizeof(struct ufid);
1264	ufhp->ufid_ino = ip->i_number;
1265	ufhp->ufid_gen = ip->i_gen;
1266	return (0);
1267}
1268
1269/*
1270 * Initialize the filesystem; just use ufs_init.
1271 */
1272static int
1273ffs_init(vfsp)
1274	struct vfsconf *vfsp;
1275{
1276
1277	softdep_initialize();
1278	mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF);
1279	return (ufs_init(vfsp));
1280}
1281
1282/*
1283 * Write a superblock and associated information back to disk.
1284 */
1285static int
1286ffs_sbupdate(mp, waitfor)
1287	struct ufsmount *mp;
1288	int waitfor;
1289{
1290	register struct fs *dfs, *fs = mp->um_fs;
1291	register struct buf *bp;
1292	int blks;
1293	void *space;
1294	int i, size, error, allerror = 0;
1295
1296	/*
1297	 * First write back the summary information.
1298	 */
1299	blks = howmany(fs->fs_cssize, fs->fs_fsize);
1300	space = fs->fs_csp;
1301	for (i = 0; i < blks; i += fs->fs_frag) {
1302		size = fs->fs_bsize;
1303		if (i + fs->fs_frag > blks)
1304			size = (blks - i) * fs->fs_fsize;
1305		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1306		    size, 0, 0);
1307		bcopy(space, bp->b_data, (u_int)size);
1308		space = (char *)space + size;
1309		if (waitfor != MNT_WAIT)
1310			bawrite(bp);
1311		else if ((error = bwrite(bp)) != 0)
1312			allerror = error;
1313	}
1314	/*
1315	 * Now write back the superblock itself. If any errors occurred
1316	 * up to this point, then fail so that the superblock avoids
1317	 * being written out as clean.
1318	 */
1319	if (allerror)
1320		return (allerror);
1321	bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
1322	fs->fs_fmod = 0;
1323	fs->fs_time = time_second;
1324	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1325	/* Restore compatibility to old file systems.		   XXX */
1326	dfs = (struct fs *)bp->b_data;				/* XXX */
1327	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
1328		dfs->fs_nrpos = -1;				/* XXX */
1329	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
1330		int32_t *lp, tmp;				/* XXX */
1331								/* XXX */
1332		lp = (int32_t *)&dfs->fs_qbmask;		/* XXX */
1333		tmp = lp[4];					/* XXX */
1334		for (i = 4; i > 0; i--)				/* XXX */
1335			lp[i] = lp[i-1];			/* XXX */
1336		lp[0] = tmp;					/* XXX */
1337	}							/* XXX */
1338	dfs->fs_maxfilesize = mp->um_savedmaxfilesize;		/* XXX */
1339	if (waitfor != MNT_WAIT)
1340		bawrite(bp);
1341	else if ((error = bwrite(bp)) != 0)
1342		allerror = error;
1343	return (allerror);
1344}
1345