ffs_vfsops.c revision 78940
1/*
2 * Copyright (c) 1989, 1991, 1993, 1994
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
34 * $FreeBSD: head/sys/ufs/ffs/ffs_vfsops.c 78940 2001-06-28 22:21:33Z jhb $
35 */
36
37#include "opt_quota.h"
38#include "opt_ufs.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/namei.h>
43#include <sys/proc.h>
44#include <sys/kernel.h>
45#include <sys/vnode.h>
46#include <sys/mount.h>
47#include <sys/bio.h>
48#include <sys/buf.h>
49#include <sys/conf.h>
50#include <sys/fcntl.h>
51#include <sys/disklabel.h>
52#include <sys/malloc.h>
53#include <sys/mutex.h>
54
55#include <ufs/ufs/extattr.h>
56#include <ufs/ufs/quota.h>
57#include <ufs/ufs/ufsmount.h>
58#include <ufs/ufs/inode.h>
59#include <ufs/ufs/ufs_extern.h>
60
61#include <ufs/ffs/fs.h>
62#include <ufs/ffs/ffs_extern.h>
63
64#include <vm/vm.h>
65#include <vm/vm_page.h>
66
67static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
68
69static int	ffs_sbupdate __P((struct ufsmount *, int));
70int	ffs_reload __P((struct mount *,struct ucred *,struct proc *));
71static int	ffs_oldfscompat __P((struct fs *));
72static int	ffs_init __P((struct vfsconf *));
73
74static struct vfsops ufs_vfsops = {
75	ffs_mount,
76	ufs_start,
77	ffs_unmount,
78	ufs_root,
79	ufs_quotactl,
80	ffs_statfs,
81	ffs_sync,
82	ffs_vget,
83	ffs_fhtovp,
84	vfs_stdcheckexp,
85	ffs_vptofh,
86	ffs_init,
87	vfs_stduninit,
88#ifdef UFS_EXTATTR
89	ufs_extattrctl,
90#else
91	vfs_stdextattrctl,
92#endif
93};
94
95VFS_SET(ufs_vfsops, ufs, 0);
96
97/*
98 * ffs_mount
99 *
100 * Called when mounting local physical media
101 *
102 * PARAMETERS:
103 *		mountroot
104 *			mp	mount point structure
105 *			path	NULL (flag for root mount!!!)
106 *			data	<unused>
107 *			ndp	<unused>
108 *			p	process (user credentials check [statfs])
109 *
110 *		mount
111 *			mp	mount point structure
112 *			path	path to mount point
113 *			data	pointer to argument struct in user space
114 *			ndp	mount point namei() return (used for
115 *				credentials on reload), reused to look
116 *				up block device.
117 *			p	process (user credentials check)
118 *
119 * RETURNS:	0	Success
120 *		!0	error number (errno.h)
121 *
122 * LOCK STATE:
123 *
124 *		ENTRY
125 *			mount point is locked
126 *		EXIT
127 *			mount point is locked
128 *
129 * NOTES:
130 *		A NULL path can be used for a flag since the mount
131 *		system call will fail with EFAULT in copyinstr in
132 *		namei() if it is a genuine NULL from the user.
133 */
134int
135ffs_mount(mp, path, data, ndp, p)
136        struct mount		*mp;	/* mount struct pointer*/
137        char			*path;	/* path to mount point*/
138        caddr_t			data;	/* arguments to FS specific mount*/
139        struct nameidata	*ndp;	/* mount point credentials*/
140        struct proc		*p;	/* process requesting mount*/
141{
142	size_t		size;
143	struct vnode	*devvp;
144	struct ufs_args args;
145	struct ufsmount *ump = 0;
146	register struct fs *fs;
147	int error, flags;
148	mode_t accessmode;
149
150	/*
151	 * Use NULL path to indicate we are mounting the root file system.
152	 */
153	if (path == NULL) {
154		if ((error = bdevvp(rootdev, &rootvp))) {
155			printf("ffs_mountroot: can't find rootvp\n");
156			return (error);
157		}
158
159		if ((error = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0)
160			return (error);
161
162		(void)VFS_STATFS(mp, &mp->mnt_stat, p);
163		return (0);
164	}
165
166	/*
167	 * Mounting non-root file system or updating a file system
168	 */
169	if ((error = copyin(data, (caddr_t)&args, sizeof(struct ufs_args)))!= 0)
170		return (error);
171
172	/*
173	 * If updating, check whether changing from read-only to
174	 * read/write; if there is no device name, that's all we do.
175	 */
176	if (mp->mnt_flag & MNT_UPDATE) {
177		ump = VFSTOUFS(mp);
178		fs = ump->um_fs;
179		devvp = ump->um_devvp;
180		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
181			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
182				return (error);
183			flags = WRITECLOSE;
184			if (mp->mnt_flag & MNT_FORCE)
185				flags |= FORCECLOSE;
186			if (mp->mnt_flag & MNT_SOFTDEP) {
187				error = softdep_flushfiles(mp, flags, p);
188			} else {
189				error = ffs_flushfiles(mp, flags, p);
190			}
191			if (error) {
192				vn_finished_write(mp);
193				return (error);
194			}
195			if (fs->fs_pendingblocks != 0 ||
196			    fs->fs_pendinginodes != 0) {
197				printf("%s: update error: blocks %d files %d\n",
198				    fs->fs_fsmnt, fs->fs_pendingblocks,
199				    fs->fs_pendinginodes);
200				fs->fs_pendingblocks = 0;
201				fs->fs_pendinginodes = 0;
202			}
203			fs->fs_ronly = 1;
204			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
205				fs->fs_clean = 1;
206			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
207				fs->fs_ronly = 0;
208				fs->fs_clean = 0;
209				vn_finished_write(mp);
210				return (error);
211			}
212			vn_finished_write(mp);
213		}
214		if ((mp->mnt_flag & MNT_RELOAD) &&
215		    (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)) != 0)
216			return (error);
217		if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
218			/*
219			 * If upgrade to read-write by non-root, then verify
220			 * that user has necessary permissions on the device.
221			 */
222			if (p->p_ucred->cr_uid != 0) {
223				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
224				if ((error = VOP_ACCESS(devvp, VREAD | VWRITE,
225				    p->p_ucred, p)) != 0) {
226					VOP_UNLOCK(devvp, 0, p);
227					return (error);
228				}
229				VOP_UNLOCK(devvp, 0, p);
230			}
231			fs->fs_flags &= ~FS_UNCLEAN;
232			if (fs->fs_clean == 0) {
233				fs->fs_flags |= FS_UNCLEAN;
234				if ((mp->mnt_flag & MNT_FORCE) ||
235				    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
236				     (fs->fs_flags & FS_DOSOFTDEP))) {
237					printf("WARNING: %s was not %s\n",
238					   fs->fs_fsmnt, "properly dismounted");
239				} else {
240					printf(
241"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
242					    fs->fs_fsmnt);
243					return (EPERM);
244				}
245			}
246			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
247				return (error);
248			fs->fs_ronly = 0;
249			fs->fs_clean = 0;
250			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
251				vn_finished_write(mp);
252				return (error);
253			}
254			/* check to see if we need to start softdep */
255			if ((fs->fs_flags & FS_DOSOFTDEP) &&
256			    (error = softdep_mount(devvp, mp, fs, p->p_ucred))){
257				vn_finished_write(mp);
258				return (error);
259			}
260			if (fs->fs_snapinum[0] != 0)
261				ffs_snapshot_mount(mp);
262			vn_finished_write(mp);
263		}
264		/*
265		 * Soft updates is incompatible with "async",
266		 * so if we are doing softupdates stop the user
267		 * from setting the async flag in an update.
268		 * Softdep_mount() clears it in an initial mount
269		 * or ro->rw remount.
270		 */
271		if (mp->mnt_flag & MNT_SOFTDEP)
272			mp->mnt_flag &= ~MNT_ASYNC;
273		/*
274		 * If not updating name, process export requests.
275		 */
276		if (args.fspec == 0)
277			return (vfs_export(mp, &args.export));
278		/*
279		 * If this is a snapshot request, take the snapshot.
280		 */
281		if (mp->mnt_flag & MNT_SNAPSHOT)
282			return (ffs_snapshot(mp, args.fspec));
283	}
284
285	/*
286	 * Not an update, or updating the name: look up the name
287	 * and verify that it refers to a sensible block device.
288	 */
289	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
290	if ((error = namei(ndp)) != 0)
291		return (error);
292	NDFREE(ndp, NDF_ONLY_PNBUF);
293	devvp = ndp->ni_vp;
294	if (!vn_isdisk(devvp, &error)) {
295		vrele(devvp);
296		return (error);
297	}
298
299	/*
300	 * If mount by non-root, then verify that user has necessary
301	 * permissions on the device.
302	 */
303	if (p->p_ucred->cr_uid != 0) {
304		accessmode = VREAD;
305		if ((mp->mnt_flag & MNT_RDONLY) == 0)
306			accessmode |= VWRITE;
307		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
308		if ((error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p))!= 0){
309			vput(devvp);
310			return (error);
311		}
312		VOP_UNLOCK(devvp, 0, p);
313	}
314
315	if (mp->mnt_flag & MNT_UPDATE) {
316		/*
317		 * Update only
318		 *
319		 * If it's not the same vnode, or at least the same device
320		 * then it's not correct.
321		 */
322
323		if (devvp != ump->um_devvp &&
324		    devvp->v_rdev != ump->um_devvp->v_rdev)
325			error = EINVAL;	/* needs translation */
326		vrele(devvp);
327		if (error)
328			return (error);
329	} else {
330		/*
331		 * New mount
332		 *
333		 * We need the name for the mount point (also used for
334		 * "last mounted on") copied in. If an error occurs,
335		 * the mount point is discarded by the upper level code.
336		 * Note that vfs_mount() populates f_mntonname for us.
337		 */
338		if ((error = ffs_mountfs(devvp, mp, p, M_FFSNODE)) != 0) {
339			vrele(devvp);
340			return (error);
341		}
342	}
343	/*
344	 * Save "mounted from" device name info for mount point (NULL pad).
345	 */
346	copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
347	bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
348	/*
349	 * Initialize filesystem stat information in mount struct.
350	 */
351	(void)VFS_STATFS(mp, &mp->mnt_stat, p);
352	return (0);
353}
354
355/*
356 * Reload all incore data for a filesystem (used after running fsck on
357 * the root filesystem and finding things to fix). The filesystem must
358 * be mounted read-only.
359 *
360 * Things to do to update the mount:
361 *	1) invalidate all cached meta-data.
362 *	2) re-read superblock from disk.
363 *	3) re-read summary information from disk.
364 *	4) invalidate all inactive vnodes.
365 *	5) invalidate all cached file data.
366 *	6) re-read inode data for all active vnodes.
367 */
368int
369ffs_reload(mp, cred, p)
370	register struct mount *mp;
371	struct ucred *cred;
372	struct proc *p;
373{
374	register struct vnode *vp, *nvp, *devvp;
375	struct inode *ip;
376	void *space;
377	struct buf *bp;
378	struct fs *fs, *newfs;
379	struct partinfo dpart;
380	dev_t dev;
381	int i, blks, size, error;
382	int32_t *lp;
383
384	if ((mp->mnt_flag & MNT_RDONLY) == 0)
385		return (EINVAL);
386	/*
387	 * Step 1: invalidate all cached meta-data.
388	 */
389	devvp = VFSTOUFS(mp)->um_devvp;
390	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
391	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
392	VOP_UNLOCK(devvp, 0, p);
393	if (error)
394		panic("ffs_reload: dirty1");
395
396	dev = devvp->v_rdev;
397
398	/*
399	 * Only VMIO the backing device if the backing device is a real
400	 * block device.
401	 */
402	if (vn_isdisk(devvp, NULL)) {
403		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
404		vfs_object_create(devvp, p, p->p_ucred);
405		mtx_lock(&devvp->v_interlock);
406		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
407	}
408
409	/*
410	 * Step 2: re-read superblock from disk.
411	 */
412	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
413		size = DEV_BSIZE;
414	else
415		size = dpart.disklab->d_secsize;
416	if ((error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) != 0)
417		return (error);
418	newfs = (struct fs *)bp->b_data;
419	if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
420		newfs->fs_bsize < sizeof(struct fs)) {
421			brelse(bp);
422			return (EIO);		/* XXX needs translation */
423	}
424	fs = VFSTOUFS(mp)->um_fs;
425	/*
426	 * Copy pointer fields back into superblock before copying in	XXX
427	 * new superblock. These should really be in the ufsmount.	XXX
428	 * Note that important parameters (eg fs_ncg) are unchanged.
429	 */
430	newfs->fs_csp = fs->fs_csp;
431	newfs->fs_maxcluster = fs->fs_maxcluster;
432	newfs->fs_contigdirs = fs->fs_contigdirs;
433	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
434	if (fs->fs_sbsize < SBSIZE)
435		bp->b_flags |= B_INVAL | B_NOCACHE;
436	brelse(bp);
437	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
438	ffs_oldfscompat(fs);
439	/* An old fsck may have zeroed these fields, so recheck them. */
440	if (fs->fs_avgfilesize <= 0)		/* XXX */
441		fs->fs_avgfilesize = AVFILESIZ;	/* XXX */
442	if (fs->fs_avgfpdir <= 0)		/* XXX */
443		fs->fs_avgfpdir = AFPDIR;	/* XXX */
444	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
445		printf("%s: reload pending error: blocks %d files %d\n",
446		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
447		fs->fs_pendingblocks = 0;
448		fs->fs_pendinginodes = 0;
449	}
450
451	/*
452	 * Step 3: re-read summary information from disk.
453	 */
454	blks = howmany(fs->fs_cssize, fs->fs_fsize);
455	space = fs->fs_csp;
456	for (i = 0; i < blks; i += fs->fs_frag) {
457		size = fs->fs_bsize;
458		if (i + fs->fs_frag > blks)
459			size = (blks - i) * fs->fs_fsize;
460		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
461		    NOCRED, &bp);
462		if (error)
463			return (error);
464		bcopy(bp->b_data, space, (u_int)size);
465		space = (char *)space + size;
466		brelse(bp);
467	}
468	/*
469	 * We no longer know anything about clusters per cylinder group.
470	 */
471	if (fs->fs_contigsumsize > 0) {
472		lp = fs->fs_maxcluster;
473		for (i = 0; i < fs->fs_ncg; i++)
474			*lp++ = fs->fs_contigsumsize;
475	}
476
477loop:
478	mtx_lock(&mntvnode_mtx);
479	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
480		if (vp->v_mount != mp) {
481			mtx_unlock(&mntvnode_mtx);
482			goto loop;
483		}
484		nvp = LIST_NEXT(vp, v_mntvnodes);
485		mtx_unlock(&mntvnode_mtx);
486		/*
487		 * Step 4: invalidate all inactive vnodes.
488		 */
489		if (vrecycle(vp, NULL, p))
490			goto loop;
491		/*
492		 * Step 5: invalidate all cached file data.
493		 */
494		mtx_lock(&vp->v_interlock);
495		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
496			goto loop;
497		}
498		if (vinvalbuf(vp, 0, cred, p, 0, 0))
499			panic("ffs_reload: dirty2");
500		/*
501		 * Step 6: re-read inode data for all active vnodes.
502		 */
503		ip = VTOI(vp);
504		error =
505		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
506		    (int)fs->fs_bsize, NOCRED, &bp);
507		if (error) {
508			vput(vp);
509			return (error);
510		}
511		ip->i_din = *((struct dinode *)bp->b_data +
512		    ino_to_fsbo(fs, ip->i_number));
513		ip->i_effnlink = ip->i_nlink;
514		brelse(bp);
515		vput(vp);
516		mtx_lock(&mntvnode_mtx);
517	}
518	mtx_unlock(&mntvnode_mtx);
519	return (0);
520}
521
522#include <sys/sysctl.h>
523int bigcgs = 0;
524SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
525
526/*
527 * Common code for mount and mountroot
528 */
529int
530ffs_mountfs(devvp, mp, p, malloctype)
531	register struct vnode *devvp;
532	struct mount *mp;
533	struct proc *p;
534	struct malloc_type *malloctype;
535{
536	register struct ufsmount *ump;
537	struct buf *bp;
538	register struct fs *fs;
539	dev_t dev;
540	struct partinfo dpart;
541	void *space;
542	int error, i, blks, size, ronly;
543	int32_t *lp;
544	struct ucred *cred;
545	u_int64_t maxfilesize;					/* XXX */
546	size_t strsize;
547	int ncount;
548
549	dev = devvp->v_rdev;
550	cred = p ? p->p_ucred : NOCRED;
551	/*
552	 * Disallow multiple mounts of the same device.
553	 * Disallow mounting of a device that is currently in use
554	 * (except for root, which might share swap device for miniroot).
555	 * Flush out any old buffers remaining from a previous use.
556	 */
557	error = vfs_mountedon(devvp);
558	if (error)
559		return (error);
560	ncount = vcount(devvp);
561
562	if (ncount > 1 && devvp != rootvp)
563		return (EBUSY);
564	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
565	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
566	VOP_UNLOCK(devvp, 0, p);
567	if (error)
568		return (error);
569
570	/*
571	 * Only VMIO the backing device if the backing device is a real
572	 * block device.
573	 * Note that it is optional that the backing device be VMIOed.  This
574	 * increases the opportunity for metadata caching.
575	 */
576	if (vn_isdisk(devvp, NULL)) {
577		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
578		vfs_object_create(devvp, p, cred);
579		mtx_lock(&devvp->v_interlock);
580		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
581	}
582
583	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
584	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
585	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
586	VOP_UNLOCK(devvp, 0, p);
587	if (error)
588		return (error);
589	if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max)
590		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
591	if (mp->mnt_iosize_max > MAXPHYS)
592		mp->mnt_iosize_max = MAXPHYS;
593
594	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
595		size = DEV_BSIZE;
596	else
597		size = dpart.disklab->d_secsize;
598
599	bp = NULL;
600	ump = NULL;
601	if ((error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) != 0)
602		goto out;
603	fs = (struct fs *)bp->b_data;
604	if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
605	    fs->fs_bsize < sizeof(struct fs)) {
606		error = EINVAL;		/* XXX needs translation */
607		goto out;
608	}
609	fs->fs_fmod = 0;
610	fs->fs_flags &= ~FS_UNCLEAN;
611	if (fs->fs_clean == 0) {
612		fs->fs_flags |= FS_UNCLEAN;
613		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
614		    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
615		     (fs->fs_flags & FS_DOSOFTDEP))) {
616			printf(
617"WARNING: %s was not properly dismounted\n",
618			    fs->fs_fsmnt);
619		} else {
620			printf(
621"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
622			    fs->fs_fsmnt);
623			error = EPERM;
624			goto out;
625		}
626		if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
627			printf("%s: lost blocks %d files %d\n", fs->fs_fsmnt,
628			    fs->fs_pendingblocks, fs->fs_pendinginodes);
629			fs->fs_pendingblocks = 0;
630			fs->fs_pendinginodes = 0;
631		}
632	}
633	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
634		printf("%s: mount pending error: blocks %d files %d\n",
635		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
636		fs->fs_pendingblocks = 0;
637		fs->fs_pendinginodes = 0;
638	}
639	/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
640	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
641		error = EROFS;          /* needs translation */
642		goto out;
643	}
644	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
645	ump->um_malloctype = malloctype;
646	ump->um_i_effnlink_valid = 1;
647	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
648	    M_WAITOK);
649	ump->um_blkatoff = ffs_blkatoff;
650	ump->um_truncate = ffs_truncate;
651	ump->um_update = ffs_update;
652	ump->um_valloc = ffs_valloc;
653	ump->um_vfree = ffs_vfree;
654	ump->um_balloc = ffs_balloc;
655	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
656	if (fs->fs_sbsize < SBSIZE)
657		bp->b_flags |= B_INVAL | B_NOCACHE;
658	brelse(bp);
659	bp = NULL;
660	fs = ump->um_fs;
661	fs->fs_ronly = ronly;
662	size = fs->fs_cssize;
663	blks = howmany(size, fs->fs_fsize);
664	if (fs->fs_contigsumsize > 0)
665		size += fs->fs_ncg * sizeof(int32_t);
666	size += fs->fs_ncg * sizeof(u_int8_t);
667	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
668	fs->fs_csp = space;
669	for (i = 0; i < blks; i += fs->fs_frag) {
670		size = fs->fs_bsize;
671		if (i + fs->fs_frag > blks)
672			size = (blks - i) * fs->fs_fsize;
673		if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
674		    cred, &bp)) != 0) {
675			free(fs->fs_csp, M_UFSMNT);
676			goto out;
677		}
678		bcopy(bp->b_data, space, (u_int)size);
679		space = (char *)space + size;
680		brelse(bp);
681		bp = NULL;
682	}
683	if (fs->fs_contigsumsize > 0) {
684		fs->fs_maxcluster = lp = space;
685		for (i = 0; i < fs->fs_ncg; i++)
686			*lp++ = fs->fs_contigsumsize;
687	}
688	size = fs->fs_ncg * sizeof(u_int8_t);
689	fs->fs_contigdirs = (u_int8_t *)space;
690	space = (u_int8_t *)space + size;
691	bzero(fs->fs_contigdirs, size);
692	/* Compatibility for old filesystems 	   XXX */
693	if (fs->fs_avgfilesize <= 0)		/* XXX */
694		fs->fs_avgfilesize = AVFILESIZ;	/* XXX */
695	if (fs->fs_avgfpdir <= 0)		/* XXX */
696		fs->fs_avgfpdir = AFPDIR;	/* XXX */
697	mp->mnt_data = (qaddr_t)ump;
698	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
699	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
700	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
701	    vfs_getvfs(&mp->mnt_stat.f_fsid))
702		vfs_getnewfsid(mp);
703	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
704	mp->mnt_flag |= MNT_LOCAL;
705	ump->um_mountp = mp;
706	ump->um_dev = dev;
707	ump->um_devvp = devvp;
708	ump->um_nindir = fs->fs_nindir;
709	ump->um_bptrtodb = fs->fs_fsbtodb;
710	ump->um_seqinc = fs->fs_frag;
711	for (i = 0; i < MAXQUOTAS; i++)
712		ump->um_quotas[i] = NULLVP;
713#ifdef UFS_EXTATTR
714	ufs_extattr_uepm_init(&ump->um_extattr);
715#endif
716	devvp->v_rdev->si_mountpoint = mp;
717	ffs_oldfscompat(fs);
718
719	/*
720	 * Set FS local "last mounted on" information (NULL pad)
721	 */
722	copystr(	mp->mnt_stat.f_mntonname,	/* mount point*/
723			fs->fs_fsmnt,			/* copy area*/
724			sizeof(fs->fs_fsmnt) - 1,	/* max size*/
725			&strsize);			/* real size*/
726	bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);
727
728	if( mp->mnt_flag & MNT_ROOTFS) {
729		/*
730		 * Root mount; update timestamp in mount structure.
731		 * this will be used by the common root mount code
732		 * to update the system clock.
733		 */
734		mp->mnt_time = fs->fs_time;
735	}
736
737	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
738	maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1;	/* XXX */
739	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
740		fs->fs_maxfilesize = maxfilesize;		/* XXX */
741	if (bigcgs) {
742		if (fs->fs_sparecon[0] <= 0)
743			fs->fs_sparecon[0] = fs->fs_cgsize;
744		fs->fs_cgsize = fs->fs_bsize;
745	}
746	if (ronly == 0) {
747		if ((fs->fs_flags & FS_DOSOFTDEP) &&
748		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
749			free(fs->fs_csp, M_UFSMNT);
750			goto out;
751		}
752		if (fs->fs_snapinum[0] != 0)
753			ffs_snapshot_mount(mp);
754		fs->fs_fmod = 1;
755		fs->fs_clean = 0;
756		(void) ffs_sbupdate(ump, MNT_WAIT);
757	}
758#ifdef UFS_EXTATTR
759#ifdef UFS_EXTATTR_AUTOSTART
760	/*
761	 *
762	 * Auto-starting does the following:
763	 *	- check for /.attribute in the fs, and extattr_start if so
764	 *	- for each file in .attribute, enable that file with
765	 * 	  an attribute of the same name.
766	 * Not clear how to report errors -- probably eat them.
767	 * This would all happen while the file system was busy/not
768	 * available, so would effectively be "atomic".
769	 */
770	(void) ufs_extattr_autostart(mp, p);
771#endif /* !UFS_EXTATTR_AUTOSTART */
772#endif /* !UFS_EXTATTR */
773	return (0);
774out:
775	devvp->v_rdev->si_mountpoint = NULL;
776	if (bp)
777		brelse(bp);
778	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
779	if (ump) {
780		free(ump->um_fs, M_UFSMNT);
781		free(ump, M_UFSMNT);
782		mp->mnt_data = (qaddr_t)0;
783	}
784	return (error);
785}
786
787/*
788 * Sanity checks for old file systems.
789 *
790 * XXX - goes away some day.
791 */
792static int
793ffs_oldfscompat(fs)
794	struct fs *fs;
795{
796
797	fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect);	/* XXX */
798	fs->fs_interleave = max(fs->fs_interleave, 1);		/* XXX */
799	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
800		fs->fs_nrpos = 8;				/* XXX */
801	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
802#if 0
803		int i;						/* XXX */
804		u_int64_t sizepb = fs->fs_bsize;		/* XXX */
805								/* XXX */
806		fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1;	/* XXX */
807		for (i = 0; i < NIADDR; i++) {			/* XXX */
808			sizepb *= NINDIR(fs);			/* XXX */
809			fs->fs_maxfilesize += sizepb;		/* XXX */
810		}						/* XXX */
811#endif
812		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
813		fs->fs_qbmask = ~fs->fs_bmask;			/* XXX */
814		fs->fs_qfmask = ~fs->fs_fmask;			/* XXX */
815	}							/* XXX */
816	return (0);
817}
818
819/*
820 * unmount system call
821 */
822int
823ffs_unmount(mp, mntflags, p)
824	struct mount *mp;
825	int mntflags;
826	struct proc *p;
827{
828	register struct ufsmount *ump = VFSTOUFS(mp);
829	register struct fs *fs;
830	int error, flags;
831
832	flags = 0;
833	if (mntflags & MNT_FORCE) {
834		flags |= FORCECLOSE;
835	}
836#ifdef UFS_EXTATTR
837	if ((error = ufs_extattr_stop(mp, p)))
838		if (error != EOPNOTSUPP)
839			printf("ffs_unmount: ufs_extattr_stop returned %d\n",
840			    error);
841	ufs_extattr_uepm_destroy(&ump->um_extattr);
842#endif
843	if (mp->mnt_flag & MNT_SOFTDEP) {
844		if ((error = softdep_flushfiles(mp, flags, p)) != 0)
845			return (error);
846	} else {
847		if ((error = ffs_flushfiles(mp, flags, p)) != 0)
848			return (error);
849	}
850	fs = ump->um_fs;
851	if (bigcgs) {
852		fs->fs_cgsize = fs->fs_sparecon[0];
853		fs->fs_sparecon[0] = 0;
854	}
855	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
856		printf("%s: unmount pending error: blocks %d files %d\n",
857		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
858		fs->fs_pendingblocks = 0;
859		fs->fs_pendinginodes = 0;
860	}
861	if (fs->fs_ronly == 0) {
862		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
863		error = ffs_sbupdate(ump, MNT_WAIT);
864		if (error) {
865			fs->fs_clean = 0;
866			return (error);
867		}
868	}
869	ump->um_devvp->v_rdev->si_mountpoint = NULL;
870
871	vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0);
872	error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
873		NOCRED, p);
874
875	vrele(ump->um_devvp);
876
877	free(fs->fs_csp, M_UFSMNT);
878	free(fs, M_UFSMNT);
879	free(ump, M_UFSMNT);
880	mp->mnt_data = (qaddr_t)0;
881	mp->mnt_flag &= ~MNT_LOCAL;
882	return (error);
883}
884
885/*
886 * Flush out all the files in a filesystem.
887 */
888int
889ffs_flushfiles(mp, flags, p)
890	register struct mount *mp;
891	int flags;
892	struct proc *p;
893{
894	register struct ufsmount *ump;
895	int error;
896
897	ump = VFSTOUFS(mp);
898#ifdef QUOTA
899	if (mp->mnt_flag & MNT_QUOTA) {
900		int i;
901		error = vflush(mp, 0, SKIPSYSTEM|flags);
902		if (error)
903			return (error);
904		for (i = 0; i < MAXQUOTAS; i++) {
905			if (ump->um_quotas[i] == NULLVP)
906				continue;
907			quotaoff(p, mp, i);
908		}
909		/*
910		 * Here we fall through to vflush again to ensure
911		 * that we have gotten rid of all the system vnodes.
912		 */
913	}
914#endif
915	if (ump->um_devvp->v_flag & VCOPYONWRITE) {
916		if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
917			return (error);
918		ffs_snapshot_unmount(mp);
919		/*
920		 * Here we fall through to vflush again to ensure
921		 * that we have gotten rid of all the system vnodes.
922		 */
923	}
924        /*
925	 * Flush all the files.
926	 */
927	if ((error = vflush(mp, 0, flags)) != 0)
928		return (error);
929	/*
930	 * Flush filesystem metadata.
931	 */
932	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
933	error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p);
934	VOP_UNLOCK(ump->um_devvp, 0, p);
935	return (error);
936}
937
938/*
939 * Get file system statistics.
940 */
941int
942ffs_statfs(mp, sbp, p)
943	struct mount *mp;
944	register struct statfs *sbp;
945	struct proc *p;
946{
947	register struct ufsmount *ump;
948	register struct fs *fs;
949
950	ump = VFSTOUFS(mp);
951	fs = ump->um_fs;
952	if (fs->fs_magic != FS_MAGIC)
953		panic("ffs_statfs");
954	sbp->f_bsize = fs->fs_fsize;
955	sbp->f_iosize = fs->fs_bsize;
956	sbp->f_blocks = fs->fs_dsize;
957	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
958	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
959	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
960	    dbtofsb(fs, fs->fs_pendingblocks);
961	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
962	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
963	if (sbp != &mp->mnt_stat) {
964		sbp->f_type = mp->mnt_vfc->vfc_typenum;
965		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
966			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
967		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
968			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
969	}
970	return (0);
971}
972
973/*
974 * Go through the disk queues to initiate sandbagged IO;
975 * go through the inodes to write those that have been modified;
976 * initiate the writing of the super block if it has been modified.
977 *
978 * Note: we are always called with the filesystem marked `MPBUSY'.
979 */
980int
981ffs_sync(mp, waitfor, cred, p)
982	struct mount *mp;
983	int waitfor;
984	struct ucred *cred;
985	struct proc *p;
986{
987	struct vnode *nvp, *vp, *devvp;
988	struct inode *ip;
989	struct ufsmount *ump = VFSTOUFS(mp);
990	struct fs *fs;
991	int error, count, wait, lockreq, allerror = 0;
992
993	fs = ump->um_fs;
994	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
995		printf("fs = %s\n", fs->fs_fsmnt);
996		panic("ffs_sync: rofs mod");
997	}
998	/*
999	 * Write back each (modified) inode.
1000	 */
1001	wait = 0;
1002	lockreq = LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK;
1003	if (waitfor == MNT_WAIT) {
1004		wait = 1;
1005		lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1006	}
1007	mtx_lock(&mntvnode_mtx);
1008loop:
1009	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1010		/*
1011		 * If the vnode that we are about to sync is no longer
1012		 * associated with this mount point, start over.
1013		 */
1014		if (vp->v_mount != mp)
1015			goto loop;
1016		nvp = LIST_NEXT(vp, v_mntvnodes);
1017
1018		mtx_unlock(&mntvnode_mtx);
1019		mtx_lock(&vp->v_interlock);
1020		ip = VTOI(vp);
1021		if (vp->v_type == VNON || ((ip->i_flag &
1022		     (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1023		     TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
1024			mtx_unlock(&vp->v_interlock);
1025			mtx_lock(&mntvnode_mtx);
1026			continue;
1027		}
1028		if (vp->v_type != VCHR) {
1029			if ((error = vget(vp, lockreq, p)) != 0) {
1030				mtx_lock(&mntvnode_mtx);
1031				if (error == ENOENT)
1032					goto loop;
1033				continue;
1034			}
1035			if ((error = VOP_FSYNC(vp, cred, waitfor, p)) != 0)
1036				allerror = error;
1037			VOP_UNLOCK(vp, 0, p);
1038			vrele(vp);
1039		} else {
1040			mtx_unlock(&vp->v_interlock);
1041			UFS_UPDATE(vp, wait);
1042		}
1043		mtx_lock(&mntvnode_mtx);
1044	}
1045	mtx_unlock(&mntvnode_mtx);
1046	/*
1047	 * Force stale file system control information to be flushed.
1048	 */
1049	if (waitfor == MNT_WAIT) {
1050		if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
1051			allerror = error;
1052		/* Flushed work items may create new vnodes to clean */
1053		if (count) {
1054			mtx_lock(&mntvnode_mtx);
1055			goto loop;
1056		}
1057	}
1058#ifdef QUOTA
1059	qsync(mp);
1060#endif
1061	devvp = ump->um_devvp;
1062	mtx_lock(&devvp->v_interlock);
1063	if (waitfor != MNT_LAZY &&
1064	    (devvp->v_numoutput > 0 || TAILQ_FIRST(&devvp->v_dirtyblkhd))) {
1065		mtx_unlock(&devvp->v_interlock);
1066		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
1067		if ((error = VOP_FSYNC(devvp, cred, waitfor, p)) != 0)
1068			allerror = error;
1069		VOP_UNLOCK(devvp, 0, p);
1070		if (waitfor == MNT_WAIT) {
1071			mtx_lock(&mntvnode_mtx);
1072			goto loop;
1073		}
1074	} else
1075		mtx_unlock(&devvp->v_interlock);
1076	/*
1077	 * Write back modified superblock.
1078	 */
1079	if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0)
1080		allerror = error;
1081	return (allerror);
1082}
1083
1084/*
1085 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1086 * in from disk.  If it is in core, wait for the lock bit to clear, then
1087 * return the inode locked.  Detection and handling of mount points must be
1088 * done by the calling routine.
1089 */
1090static int ffs_inode_hash_lock;
1091/*
1092 * ffs_inode_hash_lock is a variable to manage mutual exclusion
1093 * of vnode allocation and intertion to the hash, especially to
1094 * avoid holding more than one vnodes for the same inode in the
1095 * hash table. ffs_inode_hash_lock must hence be tested-and-set
1096 * or cleared atomically, accomplished by ffs_inode_hash_mtx.
1097 *
1098 * As vnode allocation may block during MALLOC() and zone
1099 * allocation, we should also do msleep() to give away the CPU
1100 * if anyone else is allocating a vnode. lockmgr is not suitable
1101 * here because someone else may insert to the hash table the
1102 * vnode we are trying to allocate during our sleep, in which
1103 * case the hash table needs to be examined once again after
1104 * waking up.
1105 */
1106static struct mtx ffs_inode_hash_mtx;
1107
1108int
1109ffs_vget(mp, ino, vpp)
1110	struct mount *mp;
1111	ino_t ino;
1112	struct vnode **vpp;
1113{
1114	struct fs *fs;
1115	struct inode *ip;
1116	struct ufsmount *ump;
1117	struct buf *bp;
1118	struct vnode *vp;
1119	dev_t dev;
1120	int error, want_wakeup;
1121
1122	ump = VFSTOUFS(mp);
1123	dev = ump->um_dev;
1124restart:
1125	if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1126		return (0);
1127	}
1128
1129	/*
1130	 * Lock out the creation of new entries in the FFS hash table in
1131	 * case getnewvnode() or MALLOC() blocks, otherwise a duplicate
1132	 * may occur!
1133	 */
1134	mtx_lock(&ffs_inode_hash_mtx);
1135	if (ffs_inode_hash_lock) {
1136		while (ffs_inode_hash_lock) {
1137			ffs_inode_hash_lock = -1;
1138			msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
1139		}
1140		mtx_unlock(&ffs_inode_hash_mtx);
1141		goto restart;
1142	}
1143	ffs_inode_hash_lock = 1;
1144	mtx_unlock(&ffs_inode_hash_mtx);
1145
1146	/*
1147	 * If this MALLOC() is performed after the getnewvnode()
1148	 * it might block, leaving a vnode with a NULL v_data to be
1149	 * found by ffs_sync() if a sync happens to fire right then,
1150	 * which will cause a panic because ffs_sync() blindly
1151	 * dereferences vp->v_data (as well it should).
1152	 */
1153	MALLOC(ip, struct inode *, sizeof(struct inode),
1154	    ump->um_malloctype, M_WAITOK);
1155
1156	/* Allocate a new vnode/inode. */
1157	error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
1158	if (error) {
1159		/*
1160		 * Do not wake up processes while holding the mutex,
1161		 * otherwise the processes waken up immediately hit
1162		 * themselves into the mutex.
1163		 */
1164		mtx_lock(&ffs_inode_hash_mtx);
1165		want_wakeup = ffs_inode_hash_lock < 0;
1166		ffs_inode_hash_lock = 0;
1167		mtx_unlock(&ffs_inode_hash_mtx);
1168		if (want_wakeup)
1169			wakeup(&ffs_inode_hash_lock);
1170		*vpp = NULL;
1171		FREE(ip, ump->um_malloctype);
1172		return (error);
1173	}
1174	bzero((caddr_t)ip, sizeof(struct inode));
1175	/*
1176	 * FFS supports lock sharing in the stack of vnodes
1177	 */
1178	vp->v_vnlock = &vp->v_lock;
1179	lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE);
1180	vp->v_data = ip;
1181	ip->i_vnode = vp;
1182	ip->i_fs = fs = ump->um_fs;
1183	ip->i_dev = dev;
1184	ip->i_number = ino;
1185#ifdef QUOTA
1186	{
1187		int i;
1188		for (i = 0; i < MAXQUOTAS; i++)
1189			ip->i_dquot[i] = NODQUOT;
1190	}
1191#endif
1192	/*
1193	 * Put it onto its hash chain and lock it so that other requests for
1194	 * this inode will block if they arrive while we are sleeping waiting
1195	 * for old data structures to be purged or for the contents of the
1196	 * disk portion of this inode to be read.
1197	 */
1198	ufs_ihashins(ip);
1199
1200	/*
1201	 * Do not wake up processes while holding the mutex,
1202	 * otherwise the processes waken up immediately hit
1203	 * themselves into the mutex.
1204	 */
1205	mtx_lock(&ffs_inode_hash_mtx);
1206	want_wakeup = ffs_inode_hash_lock < 0;
1207	ffs_inode_hash_lock = 0;
1208	mtx_unlock(&ffs_inode_hash_mtx);
1209	if (want_wakeup)
1210		wakeup(&ffs_inode_hash_lock);
1211
1212	/* Read in the disk contents for the inode, copy into the inode. */
1213	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1214	    (int)fs->fs_bsize, NOCRED, &bp);
1215	if (error) {
1216		/*
1217		 * The inode does not contain anything useful, so it would
1218		 * be misleading to leave it on its hash chain. With mode
1219		 * still zero, it will be unlinked and returned to the free
1220		 * list by vput().
1221		 */
1222		brelse(bp);
1223		vput(vp);
1224		*vpp = NULL;
1225		return (error);
1226	}
1227	ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1228	if (DOINGSOFTDEP(vp))
1229		softdep_load_inodeblock(ip);
1230	else
1231		ip->i_effnlink = ip->i_nlink;
1232	bqrelse(bp);
1233
1234	/*
1235	 * Initialize the vnode from the inode, check for aliases.
1236	 * Note that the underlying vnode may have changed.
1237	 */
1238	error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1239	if (error) {
1240		vput(vp);
1241		*vpp = NULL;
1242		return (error);
1243	}
1244	/*
1245	 * Finish inode initialization now that aliasing has been resolved.
1246	 */
1247	ip->i_devvp = ump->um_devvp;
1248	VREF(ip->i_devvp);
1249	/*
1250	 * Set up a generation number for this inode if it does not
1251	 * already have one. This should only happen on old filesystems.
1252	 */
1253	if (ip->i_gen == 0) {
1254		ip->i_gen = random() / 2 + 1;
1255		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1256			ip->i_flag |= IN_MODIFIED;
1257	}
1258	/*
1259	 * Ensure that uid and gid are correct. This is a temporary
1260	 * fix until fsck has been changed to do the update.
1261	 */
1262	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
1263		ip->i_uid = ip->i_din.di_ouid;		/* XXX */
1264		ip->i_gid = ip->i_din.di_ogid;		/* XXX */
1265	}						/* XXX */
1266
1267	*vpp = vp;
1268	return (0);
1269}
1270
1271/*
1272 * File handle to vnode
1273 *
1274 * Have to be really careful about stale file handles:
1275 * - check that the inode number is valid
1276 * - call ffs_vget() to get the locked inode
1277 * - check for an unallocated inode (i_mode == 0)
1278 * - check that the given client host has export rights and return
1279 *   those rights via. exflagsp and credanonp
1280 */
1281int
1282ffs_fhtovp(mp, fhp, vpp)
1283	register struct mount *mp;
1284	struct fid *fhp;
1285	struct vnode **vpp;
1286{
1287	register struct ufid *ufhp;
1288	struct fs *fs;
1289
1290	ufhp = (struct ufid *)fhp;
1291	fs = VFSTOUFS(mp)->um_fs;
1292	if (ufhp->ufid_ino < ROOTINO ||
1293	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1294		return (ESTALE);
1295	return (ufs_fhtovp(mp, ufhp, vpp));
1296}
1297
1298/*
1299 * Vnode pointer to File handle
1300 */
1301/* ARGSUSED */
1302int
1303ffs_vptofh(vp, fhp)
1304	struct vnode *vp;
1305	struct fid *fhp;
1306{
1307	register struct inode *ip;
1308	register struct ufid *ufhp;
1309
1310	ip = VTOI(vp);
1311	ufhp = (struct ufid *)fhp;
1312	ufhp->ufid_len = sizeof(struct ufid);
1313	ufhp->ufid_ino = ip->i_number;
1314	ufhp->ufid_gen = ip->i_gen;
1315	return (0);
1316}
1317
1318/*
1319 * Initialize the filesystem; just use ufs_init.
1320 */
1321static int
1322ffs_init(vfsp)
1323	struct vfsconf *vfsp;
1324{
1325
1326	softdep_initialize();
1327	mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF);
1328	return (ufs_init(vfsp));
1329}
1330
1331/*
1332 * Write a superblock and associated information back to disk.
1333 */
1334static int
1335ffs_sbupdate(mp, waitfor)
1336	struct ufsmount *mp;
1337	int waitfor;
1338{
1339	register struct fs *dfs, *fs = mp->um_fs;
1340	register struct buf *bp;
1341	int blks;
1342	void *space;
1343	int i, size, error, allerror = 0;
1344
1345	/*
1346	 * First write back the summary information.
1347	 */
1348	blks = howmany(fs->fs_cssize, fs->fs_fsize);
1349	space = fs->fs_csp;
1350	for (i = 0; i < blks; i += fs->fs_frag) {
1351		size = fs->fs_bsize;
1352		if (i + fs->fs_frag > blks)
1353			size = (blks - i) * fs->fs_fsize;
1354		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1355		    size, 0, 0);
1356		bcopy(space, bp->b_data, (u_int)size);
1357		space = (char *)space + size;
1358		if (waitfor != MNT_WAIT)
1359			bawrite(bp);
1360		else if ((error = bwrite(bp)) != 0)
1361			allerror = error;
1362	}
1363	/*
1364	 * Now write back the superblock itself. If any errors occurred
1365	 * up to this point, then fail so that the superblock avoids
1366	 * being written out as clean.
1367	 */
1368	if (allerror)
1369		return (allerror);
1370	bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
1371	fs->fs_fmod = 0;
1372	fs->fs_time = time_second;
1373	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1374	/* Restore compatibility to old file systems.		   XXX */
1375	dfs = (struct fs *)bp->b_data;				/* XXX */
1376	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
1377		dfs->fs_nrpos = -1;				/* XXX */
1378	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
1379		int32_t *lp, tmp;				/* XXX */
1380								/* XXX */
1381		lp = (int32_t *)&dfs->fs_qbmask;		/* XXX */
1382		tmp = lp[4];					/* XXX */
1383		for (i = 4; i > 0; i--)				/* XXX */
1384			lp[i] = lp[i-1];			/* XXX */
1385		lp[0] = tmp;					/* XXX */
1386	}							/* XXX */
1387	dfs->fs_maxfilesize = mp->um_savedmaxfilesize;		/* XXX */
1388	if (waitfor != MNT_WAIT)
1389		bawrite(bp);
1390	else if ((error = bwrite(bp)) != 0)
1391		allerror = error;
1392	return (allerror);
1393}
1394