ffs_vfsops.c revision 75503
1/*
2 * Copyright (c) 1989, 1991, 1993, 1994
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
34 * $FreeBSD: head/sys/ufs/ffs/ffs_vfsops.c 75503 2001-04-14 05:26:28Z mckusick $
35 */
36
37#include "opt_quota.h"
38#include "opt_ufs.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/namei.h>
43#include <sys/proc.h>
44#include <sys/kernel.h>
45#include <sys/vnode.h>
46#include <sys/mount.h>
47#include <sys/bio.h>
48#include <sys/buf.h>
49#include <sys/conf.h>
50#include <sys/fcntl.h>
51#include <sys/disklabel.h>
52#include <sys/malloc.h>
53#include <sys/mutex.h>
54
55#include <ufs/ufs/extattr.h>
56#include <ufs/ufs/quota.h>
57#include <ufs/ufs/ufsmount.h>
58#include <ufs/ufs/inode.h>
59#include <ufs/ufs/ufs_extern.h>
60
61#include <ufs/ffs/fs.h>
62#include <ufs/ffs/ffs_extern.h>
63
64#include <vm/vm.h>
65#include <vm/vm_page.h>
66
67static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
68
69static int	ffs_sbupdate __P((struct ufsmount *, int));
70int	ffs_reload __P((struct mount *,struct ucred *,struct proc *));
71static int	ffs_oldfscompat __P((struct fs *));
72static int	ffs_init __P((struct vfsconf *));
73
74static struct vfsops ufs_vfsops = {
75	ffs_mount,
76	ufs_start,
77	ffs_unmount,
78	ufs_root,
79	ufs_quotactl,
80	ffs_statfs,
81	ffs_sync,
82	ffs_vget,
83	ffs_fhtovp,
84	ufs_check_export,
85	ffs_vptofh,
86	ffs_init,
87	vfs_stduninit,
88#ifdef UFS_EXTATTR
89	ufs_extattrctl,
90#else
91	vfs_stdextattrctl,
92#endif
93};
94
95VFS_SET(ufs_vfsops, ufs, 0);
96
97/*
98 * ffs_mount
99 *
100 * Called when mounting local physical media
101 *
102 * PARAMETERS:
103 *		mountroot
104 *			mp	mount point structure
105 *			path	NULL (flag for root mount!!!)
106 *			data	<unused>
107 *			ndp	<unused>
108 *			p	process (user credentials check [statfs])
109 *
110 *		mount
111 *			mp	mount point structure
112 *			path	path to mount point
113 *			data	pointer to argument struct in user space
114 *			ndp	mount point namei() return (used for
115 *				credentials on reload), reused to look
116 *				up block device.
117 *			p	process (user credentials check)
118 *
119 * RETURNS:	0	Success
120 *		!0	error number (errno.h)
121 *
122 * LOCK STATE:
123 *
124 *		ENTRY
125 *			mount point is locked
126 *		EXIT
127 *			mount point is locked
128 *
129 * NOTES:
130 *		A NULL path can be used for a flag since the mount
131 *		system call will fail with EFAULT in copyinstr in
132 *		namei() if it is a genuine NULL from the user.
133 */
134int
135ffs_mount(mp, path, data, ndp, p)
136        struct mount		*mp;	/* mount struct pointer*/
137        char			*path;	/* path to mount point*/
138        caddr_t			data;	/* arguments to FS specific mount*/
139        struct nameidata	*ndp;	/* mount point credentials*/
140        struct proc		*p;	/* process requesting mount*/
141{
142	size_t		size;
143	struct vnode	*devvp;
144	struct ufs_args args;
145	struct ufsmount *ump = 0;
146	register struct fs *fs;
147	int error, flags;
148	mode_t accessmode;
149
150	/*
151	 * Use NULL path to indicate we are mounting the root file system.
152	 */
153	if (path == NULL) {
154		if ((error = bdevvp(rootdev, &rootvp))) {
155			printf("ffs_mountroot: can't find rootvp\n");
156			return (error);
157		}
158
159		if ((error = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0)
160			return (error);
161
162		(void)VFS_STATFS(mp, &mp->mnt_stat, p);
163		return (0);
164	}
165
166	/*
167	 * Mounting non-root file system or updating a file system
168	 */
169	if ((error = copyin(data, (caddr_t)&args, sizeof(struct ufs_args)))!= 0)
170		return (error);
171
172	/*
173	 * If updating, check whether changing from read-only to
174	 * read/write; if there is no device name, that's all we do.
175	 */
176	if (mp->mnt_flag & MNT_UPDATE) {
177		ump = VFSTOUFS(mp);
178		fs = ump->um_fs;
179		devvp = ump->um_devvp;
180		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
181			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
182				return (error);
183			flags = WRITECLOSE;
184			if (mp->mnt_flag & MNT_FORCE)
185				flags |= FORCECLOSE;
186			if (mp->mnt_flag & MNT_SOFTDEP) {
187				error = softdep_flushfiles(mp, flags, p);
188			} else {
189				error = ffs_flushfiles(mp, flags, p);
190			}
191			if (error) {
192				vn_finished_write(mp);
193				return (error);
194			}
195			fs->fs_ronly = 1;
196			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
197				fs->fs_clean = 1;
198			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
199				fs->fs_ronly = 0;
200				fs->fs_clean = 0;
201				vn_finished_write(mp);
202				return (error);
203			}
204			vn_finished_write(mp);
205		}
206		if ((mp->mnt_flag & MNT_RELOAD) &&
207		    (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)) != 0)
208			return (error);
209		if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
210			/*
211			 * If upgrade to read-write by non-root, then verify
212			 * that user has necessary permissions on the device.
213			 */
214			if (p->p_ucred->cr_uid != 0) {
215				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
216				if ((error = VOP_ACCESS(devvp, VREAD | VWRITE,
217				    p->p_ucred, p)) != 0) {
218					VOP_UNLOCK(devvp, 0, p);
219					return (error);
220				}
221				VOP_UNLOCK(devvp, 0, p);
222			}
223			fs->fs_flags &= ~FS_UNCLEAN;
224			if (fs->fs_clean == 0) {
225				fs->fs_flags |= FS_UNCLEAN;
226				if ((mp->mnt_flag & MNT_FORCE) ||
227				    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
228				     (fs->fs_flags & FS_DOSOFTDEP))) {
229					printf("WARNING: %s was not %s\n",
230					   fs->fs_fsmnt, "properly dismounted");
231				} else {
232					printf(
233"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
234					    fs->fs_fsmnt);
235					return (EPERM);
236				}
237			}
238			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
239				return (error);
240			fs->fs_ronly = 0;
241			fs->fs_clean = 0;
242			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
243				vn_finished_write(mp);
244				return (error);
245			}
246			/* check to see if we need to start softdep */
247			if ((fs->fs_flags & FS_DOSOFTDEP) &&
248			    (error = softdep_mount(devvp, mp, fs, p->p_ucred))){
249				vn_finished_write(mp);
250				return (error);
251			}
252			if (fs->fs_snapinum[0] != 0)
253				ffs_snapshot_mount(mp);
254			vn_finished_write(mp);
255		}
256		/*
257		 * Soft updates is incompatible with "async",
258		 * so if we are doing softupdates stop the user
259		 * from setting the async flag in an update.
260		 * Softdep_mount() clears it in an initial mount
261		 * or ro->rw remount.
262		 */
263		if (mp->mnt_flag & MNT_SOFTDEP)
264			mp->mnt_flag &= ~MNT_ASYNC;
265		/*
266		 * If not updating name, process export requests.
267		 */
268		if (args.fspec == 0)
269			return (vfs_export(mp, &ump->um_export, &args.export));
270		/*
271		 * If this is a snapshot request, take the snapshot.
272		 */
273		if (mp->mnt_flag & MNT_SNAPSHOT)
274			return (ffs_snapshot(mp, args.fspec));
275	}
276
277	/*
278	 * Not an update, or updating the name: look up the name
279	 * and verify that it refers to a sensible block device.
280	 */
281	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
282	if ((error = namei(ndp)) != 0)
283		return (error);
284	NDFREE(ndp, NDF_ONLY_PNBUF);
285	devvp = ndp->ni_vp;
286	if (!vn_isdisk(devvp, &error)) {
287		vrele(devvp);
288		return (error);
289	}
290
291	/*
292	 * If mount by non-root, then verify that user has necessary
293	 * permissions on the device.
294	 */
295	if (p->p_ucred->cr_uid != 0) {
296		accessmode = VREAD;
297		if ((mp->mnt_flag & MNT_RDONLY) == 0)
298			accessmode |= VWRITE;
299		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
300		if ((error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p))!= 0){
301			vput(devvp);
302			return (error);
303		}
304		VOP_UNLOCK(devvp, 0, p);
305	}
306
307	if (mp->mnt_flag & MNT_UPDATE) {
308		/*
309		 * Update only
310		 *
311		 * If it's not the same vnode, or at least the same device
312		 * then it's not correct.
313		 */
314
315		if (devvp != ump->um_devvp &&
316		    devvp->v_rdev != ump->um_devvp->v_rdev)
317			error = EINVAL;	/* needs translation */
318		vrele(devvp);
319		if (error)
320			return (error);
321	} else {
322		/*
323		 * New mount
324		 *
325		 * We need the name for the mount point (also used for
326		 * "last mounted on") copied in. If an error occurs,
327		 * the mount point is discarded by the upper level code.
328		 * Note that vfs_mount() populates f_mntonname for us.
329		 */
330		if ((error = ffs_mountfs(devvp, mp, p, M_FFSNODE)) != 0) {
331			vrele(devvp);
332			return (error);
333		}
334	}
335	/*
336	 * Save "mounted from" device name info for mount point (NULL pad).
337	 */
338	copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
339	bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
340	/*
341	 * Initialize filesystem stat information in mount struct.
342	 */
343	(void)VFS_STATFS(mp, &mp->mnt_stat, p);
344	return (0);
345}
346
347/*
348 * Reload all incore data for a filesystem (used after running fsck on
349 * the root filesystem and finding things to fix). The filesystem must
350 * be mounted read-only.
351 *
352 * Things to do to update the mount:
353 *	1) invalidate all cached meta-data.
354 *	2) re-read superblock from disk.
355 *	3) re-read summary information from disk.
356 *	4) invalidate all inactive vnodes.
357 *	5) invalidate all cached file data.
358 *	6) re-read inode data for all active vnodes.
359 */
360int
361ffs_reload(mp, cred, p)
362	register struct mount *mp;
363	struct ucred *cred;
364	struct proc *p;
365{
366	register struct vnode *vp, *nvp, *devvp;
367	struct inode *ip;
368	void *space;
369	struct buf *bp;
370	struct fs *fs, *newfs;
371	struct partinfo dpart;
372	dev_t dev;
373	int i, blks, size, error;
374	int32_t *lp;
375
376	if ((mp->mnt_flag & MNT_RDONLY) == 0)
377		return (EINVAL);
378	/*
379	 * Step 1: invalidate all cached meta-data.
380	 */
381	devvp = VFSTOUFS(mp)->um_devvp;
382	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
383	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
384	VOP_UNLOCK(devvp, 0, p);
385	if (error)
386		panic("ffs_reload: dirty1");
387
388	dev = devvp->v_rdev;
389
390	/*
391	 * Only VMIO the backing device if the backing device is a real
392	 * block device.  See ffs_mountmfs() for more details.
393	 */
394	if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
395		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
396		vfs_object_create(devvp, p, p->p_ucred);
397		mtx_lock(&devvp->v_interlock);
398		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
399	}
400
401	/*
402	 * Step 2: re-read superblock from disk.
403	 */
404	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
405		size = DEV_BSIZE;
406	else
407		size = dpart.disklab->d_secsize;
408	if ((error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) != 0)
409		return (error);
410	newfs = (struct fs *)bp->b_data;
411	if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
412		newfs->fs_bsize < sizeof(struct fs)) {
413			brelse(bp);
414			return (EIO);		/* XXX needs translation */
415	}
416	fs = VFSTOUFS(mp)->um_fs;
417	/*
418	 * Copy pointer fields back into superblock before copying in	XXX
419	 * new superblock. These should really be in the ufsmount.	XXX
420	 * Note that important parameters (eg fs_ncg) are unchanged.
421	 */
422	newfs->fs_csp = fs->fs_csp;
423	newfs->fs_maxcluster = fs->fs_maxcluster;
424	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
425	if (fs->fs_sbsize < SBSIZE)
426		bp->b_flags |= B_INVAL | B_NOCACHE;
427	brelse(bp);
428	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
429	ffs_oldfscompat(fs);
430
431	/*
432	 * Step 3: re-read summary information from disk.
433	 */
434	blks = howmany(fs->fs_cssize, fs->fs_fsize);
435	space = fs->fs_csp;
436	for (i = 0; i < blks; i += fs->fs_frag) {
437		size = fs->fs_bsize;
438		if (i + fs->fs_frag > blks)
439			size = (blks - i) * fs->fs_fsize;
440		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
441		    NOCRED, &bp);
442		if (error)
443			return (error);
444		bcopy(bp->b_data, space, (u_int)size);
445		space = (char *)space + size;
446		brelse(bp);
447	}
448	/*
449	 * We no longer know anything about clusters per cylinder group.
450	 */
451	if (fs->fs_contigsumsize > 0) {
452		lp = fs->fs_maxcluster;
453		for (i = 0; i < fs->fs_ncg; i++)
454			*lp++ = fs->fs_contigsumsize;
455	}
456
457loop:
458	mtx_lock(&mntvnode_mtx);
459	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
460		if (vp->v_mount != mp) {
461			mtx_unlock(&mntvnode_mtx);
462			goto loop;
463		}
464		nvp = LIST_NEXT(vp, v_mntvnodes);
465		/*
466		 * Step 4: invalidate all inactive vnodes.
467		 */
468		if (vrecycle(vp, &mntvnode_mtx, p))
469			goto loop;
470		/*
471		 * Step 5: invalidate all cached file data.
472		 */
473		mtx_lock(&vp->v_interlock);
474		mtx_unlock(&mntvnode_mtx);
475		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
476			goto loop;
477		}
478		if (vinvalbuf(vp, 0, cred, p, 0, 0))
479			panic("ffs_reload: dirty2");
480		/*
481		 * Step 6: re-read inode data for all active vnodes.
482		 */
483		ip = VTOI(vp);
484		error =
485		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
486		    (int)fs->fs_bsize, NOCRED, &bp);
487		if (error) {
488			vput(vp);
489			return (error);
490		}
491		ip->i_din = *((struct dinode *)bp->b_data +
492		    ino_to_fsbo(fs, ip->i_number));
493		ip->i_effnlink = ip->i_nlink;
494		brelse(bp);
495		vput(vp);
496		mtx_lock(&mntvnode_mtx);
497	}
498	mtx_unlock(&mntvnode_mtx);
499	return (0);
500}
501
502/*
503 * Common code for mount and mountroot
504 */
505int
506ffs_mountfs(devvp, mp, p, malloctype)
507	register struct vnode *devvp;
508	struct mount *mp;
509	struct proc *p;
510	struct malloc_type *malloctype;
511{
512	register struct ufsmount *ump;
513	struct buf *bp;
514	register struct fs *fs;
515	dev_t dev;
516	struct partinfo dpart;
517	void *space;
518	int error, i, blks, size, ronly;
519	int32_t *lp;
520	struct ucred *cred;
521	u_int64_t maxfilesize;					/* XXX */
522	size_t strsize;
523	int ncount;
524
525	dev = devvp->v_rdev;
526	cred = p ? p->p_ucred : NOCRED;
527	/*
528	 * Disallow multiple mounts of the same device.
529	 * Disallow mounting of a device that is currently in use
530	 * (except for root, which might share swap device for miniroot).
531	 * Flush out any old buffers remaining from a previous use.
532	 */
533	error = vfs_mountedon(devvp);
534	if (error)
535		return (error);
536	ncount = vcount(devvp);
537
538	if (ncount > 1 && devvp != rootvp)
539		return (EBUSY);
540	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
541	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
542	VOP_UNLOCK(devvp, 0, p);
543	if (error)
544		return (error);
545
546	/*
547	 * Only VMIO the backing device if the backing device is a real
548	 * block device.  This excludes the original MFS implementation.
549	 * Note that it is optional that the backing device be VMIOed.  This
550	 * increases the opportunity for metadata caching.
551	 */
552	if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) {
553		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
554		vfs_object_create(devvp, p, cred);
555		mtx_lock(&devvp->v_interlock);
556		VOP_UNLOCK(devvp, LK_INTERLOCK, p);
557	}
558
559	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
560	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
561	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
562	VOP_UNLOCK(devvp, 0, p);
563	if (error)
564		return (error);
565	if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max)
566		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
567	if (mp->mnt_iosize_max > MAXPHYS)
568		mp->mnt_iosize_max = MAXPHYS;
569
570	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
571		size = DEV_BSIZE;
572	else
573		size = dpart.disklab->d_secsize;
574
575	bp = NULL;
576	ump = NULL;
577	if ((error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) != 0)
578		goto out;
579	fs = (struct fs *)bp->b_data;
580	if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
581	    fs->fs_bsize < sizeof(struct fs)) {
582		error = EINVAL;		/* XXX needs translation */
583		goto out;
584	}
585	fs->fs_fmod = 0;
586	fs->fs_flags &= ~FS_UNCLEAN;
587	if (fs->fs_clean == 0) {
588		fs->fs_flags |= FS_UNCLEAN;
589		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
590		    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
591		     (fs->fs_flags & FS_DOSOFTDEP))) {
592			printf(
593"WARNING: %s was not properly dismounted\n",
594			    fs->fs_fsmnt);
595		} else {
596			printf(
597"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
598			    fs->fs_fsmnt);
599			error = EPERM;
600			goto out;
601		}
602	}
603	/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
604	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
605		error = EROFS;          /* needs translation */
606		goto out;
607	}
608	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
609	ump->um_malloctype = malloctype;
610	ump->um_i_effnlink_valid = 1;
611	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
612	    M_WAITOK);
613	ump->um_blkatoff = ffs_blkatoff;
614	ump->um_truncate = ffs_truncate;
615	ump->um_update = ffs_update;
616	ump->um_valloc = ffs_valloc;
617	ump->um_vfree = ffs_vfree;
618	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
619	if (fs->fs_sbsize < SBSIZE)
620		bp->b_flags |= B_INVAL | B_NOCACHE;
621	brelse(bp);
622	bp = NULL;
623	fs = ump->um_fs;
624	fs->fs_ronly = ronly;
625	size = fs->fs_cssize;
626	blks = howmany(size, fs->fs_fsize);
627	if (fs->fs_contigsumsize > 0)
628		size += fs->fs_ncg * sizeof(int32_t);
629	size += fs->fs_ncg * sizeof(u_int8_t);
630	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
631	fs->fs_csp = space;
632	for (i = 0; i < blks; i += fs->fs_frag) {
633		size = fs->fs_bsize;
634		if (i + fs->fs_frag > blks)
635			size = (blks - i) * fs->fs_fsize;
636		if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
637		    cred, &bp)) != 0) {
638			free(fs->fs_csp, M_UFSMNT);
639			goto out;
640		}
641		bcopy(bp->b_data, space, (u_int)size);
642		space = (char *)space + size;
643		brelse(bp);
644		bp = NULL;
645	}
646	if (fs->fs_contigsumsize > 0) {
647		fs->fs_maxcluster = lp = space;
648		for (i = 0; i < fs->fs_ncg; i++)
649			*lp++ = fs->fs_contigsumsize;
650	}
651	size = fs->fs_ncg * sizeof(u_int8_t);
652	fs->fs_contigdirs = (u_int8_t *)space;
653	space = (u_int8_t *)space + size;
654	bzero(fs->fs_contigdirs, size);
655	/* Compatibility for old filesystems 	   XXX */
656	if (fs->fs_avgfilesize <= 0)		/* XXX */
657		fs->fs_avgfilesize = AVFILESIZ;	/* XXX */
658	if (fs->fs_avgfpdir <= 0)		/* XXX */
659		fs->fs_avgfpdir = AFPDIR;	/* XXX */
660	mp->mnt_data = (qaddr_t)ump;
661	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
662	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
663	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
664	    vfs_getvfs(&mp->mnt_stat.f_fsid))
665		vfs_getnewfsid(mp);
666	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
667	mp->mnt_flag |= MNT_LOCAL;
668	ump->um_mountp = mp;
669	ump->um_dev = dev;
670	ump->um_devvp = devvp;
671	ump->um_nindir = fs->fs_nindir;
672	ump->um_bptrtodb = fs->fs_fsbtodb;
673	ump->um_seqinc = fs->fs_frag;
674	for (i = 0; i < MAXQUOTAS; i++)
675		ump->um_quotas[i] = NULLVP;
676#ifdef UFS_EXTATTR
677	ufs_extattr_uepm_init(&ump->um_extattr);
678#endif
679	devvp->v_rdev->si_mountpoint = mp;
680	ffs_oldfscompat(fs);
681
682	/*
683	 * Set FS local "last mounted on" information (NULL pad)
684	 */
685	copystr(	mp->mnt_stat.f_mntonname,	/* mount point*/
686			fs->fs_fsmnt,			/* copy area*/
687			sizeof(fs->fs_fsmnt) - 1,	/* max size*/
688			&strsize);			/* real size*/
689	bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);
690
691	if( mp->mnt_flag & MNT_ROOTFS) {
692		/*
693		 * Root mount; update timestamp in mount structure.
694		 * this will be used by the common root mount code
695		 * to update the system clock.
696		 */
697		mp->mnt_time = fs->fs_time;
698	}
699
700	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
701	maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1;	/* XXX */
702	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
703		fs->fs_maxfilesize = maxfilesize;		/* XXX */
704	if (ronly == 0) {
705		if ((fs->fs_flags & FS_DOSOFTDEP) &&
706		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
707			free(fs->fs_csp, M_UFSMNT);
708			goto out;
709		}
710		if (fs->fs_snapinum[0] != 0)
711			ffs_snapshot_mount(mp);
712		fs->fs_fmod = 1;
713		fs->fs_clean = 0;
714		(void) ffs_sbupdate(ump, MNT_WAIT);
715	}
716#ifdef UFS_EXTATTR
717#ifdef UFS_EXTATTR_AUTOSTART
718	/*
719	 *
720	 * Auto-starting does the following:
721	 *	- check for /.attribute in the fs, and extattr_start if so
722	 *	- for each file in .attribute, enable that file with
723	 * 	  an attribute of the same name.
724	 * Not clear how to report errors -- probably eat them.
725	 * This would all happen while the file system was busy/not
726	 * available, so would effectively be "atomic".
727	 */
728	(void) ufs_extattr_autostart(mp, p);
729#endif /* !UFS_EXTATTR_AUTOSTART */
730#endif /* !UFS_EXTATTR */
731	return (0);
732out:
733	devvp->v_rdev->si_mountpoint = NULL;
734	if (bp)
735		brelse(bp);
736	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
737	if (ump) {
738		free(ump->um_fs, M_UFSMNT);
739		free(ump, M_UFSMNT);
740		mp->mnt_data = (qaddr_t)0;
741	}
742	return (error);
743}
744
745/*
746 * Sanity checks for old file systems.
747 *
748 * XXX - goes away some day.
749 */
750static int
751ffs_oldfscompat(fs)
752	struct fs *fs;
753{
754
755	fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect);	/* XXX */
756	fs->fs_interleave = max(fs->fs_interleave, 1);		/* XXX */
757	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
758		fs->fs_nrpos = 8;				/* XXX */
759	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
760#if 0
761		int i;						/* XXX */
762		u_int64_t sizepb = fs->fs_bsize;		/* XXX */
763								/* XXX */
764		fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1;	/* XXX */
765		for (i = 0; i < NIADDR; i++) {			/* XXX */
766			sizepb *= NINDIR(fs);			/* XXX */
767			fs->fs_maxfilesize += sizepb;		/* XXX */
768		}						/* XXX */
769#endif
770		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
771		fs->fs_qbmask = ~fs->fs_bmask;			/* XXX */
772		fs->fs_qfmask = ~fs->fs_fmask;			/* XXX */
773	}							/* XXX */
774	return (0);
775}
776
777/*
778 * unmount system call
779 */
780int
781ffs_unmount(mp, mntflags, p)
782	struct mount *mp;
783	int mntflags;
784	struct proc *p;
785{
786	register struct ufsmount *ump = VFSTOUFS(mp);
787	register struct fs *fs;
788	int error, flags;
789
790	flags = 0;
791	if (mntflags & MNT_FORCE) {
792		flags |= FORCECLOSE;
793	}
794#ifdef UFS_EXTATTR
795	if ((error = ufs_extattr_stop(mp, p)))
796		if (error != EOPNOTSUPP)
797			printf("ffs_unmount: ufs_extattr_stop returned %d\n",
798			    error);
799	ufs_extattr_uepm_destroy(&ump->um_extattr);
800#endif
801	if (mp->mnt_flag & MNT_SOFTDEP) {
802		if ((error = softdep_flushfiles(mp, flags, p)) != 0)
803			return (error);
804	} else {
805		if ((error = ffs_flushfiles(mp, flags, p)) != 0)
806			return (error);
807	}
808	fs = ump->um_fs;
809	if (fs->fs_ronly == 0) {
810		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
811		error = ffs_sbupdate(ump, MNT_WAIT);
812		if (error) {
813			fs->fs_clean = 0;
814			return (error);
815		}
816	}
817	ump->um_devvp->v_rdev->si_mountpoint = NULL;
818
819	vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0);
820	error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
821		NOCRED, p);
822
823	vrele(ump->um_devvp);
824
825	free(fs->fs_csp, M_UFSMNT);
826	free(fs, M_UFSMNT);
827	free(ump, M_UFSMNT);
828	mp->mnt_data = (qaddr_t)0;
829	mp->mnt_flag &= ~MNT_LOCAL;
830	return (error);
831}
832
833/*
834 * Flush out all the files in a filesystem.
835 */
836int
837ffs_flushfiles(mp, flags, p)
838	register struct mount *mp;
839	int flags;
840	struct proc *p;
841{
842	register struct ufsmount *ump;
843	int error;
844
845	ump = VFSTOUFS(mp);
846#ifdef QUOTA
847	if (mp->mnt_flag & MNT_QUOTA) {
848		int i;
849		error = vflush(mp, NULLVP, SKIPSYSTEM|flags);
850		if (error)
851			return (error);
852		for (i = 0; i < MAXQUOTAS; i++) {
853			if (ump->um_quotas[i] == NULLVP)
854				continue;
855			quotaoff(p, mp, i);
856		}
857		/*
858		 * Here we fall through to vflush again to ensure
859		 * that we have gotten rid of all the system vnodes.
860		 */
861	}
862#endif
863	if (ump->um_devvp->v_flag & VCOPYONWRITE) {
864		if ((error = vflush(mp, NULL, SKIPSYSTEM | flags)) != 0)
865			return (error);
866		ffs_snapshot_unmount(mp);
867		/*
868		 * Here we fall through to vflush again to ensure
869		 * that we have gotten rid of all the system vnodes.
870		 */
871	}
872        /*
873	 * Flush all the files.
874	 */
875	if ((error = vflush(mp, NULL, flags)) != 0)
876		return (error);
877	/*
878	 * Flush filesystem metadata.
879	 */
880	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
881	error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p);
882	VOP_UNLOCK(ump->um_devvp, 0, p);
883	return (error);
884}
885
886/*
887 * Get file system statistics.
888 */
889int
890ffs_statfs(mp, sbp, p)
891	struct mount *mp;
892	register struct statfs *sbp;
893	struct proc *p;
894{
895	register struct ufsmount *ump;
896	register struct fs *fs;
897
898	ump = VFSTOUFS(mp);
899	fs = ump->um_fs;
900	if (fs->fs_magic != FS_MAGIC)
901		panic("ffs_statfs");
902	sbp->f_bsize = fs->fs_fsize;
903	sbp->f_iosize = fs->fs_bsize;
904	sbp->f_blocks = fs->fs_dsize;
905	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
906		fs->fs_cstotal.cs_nffree;
907	sbp->f_bavail = freespace(fs, fs->fs_minfree);
908	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
909	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
910	if (sbp != &mp->mnt_stat) {
911		sbp->f_type = mp->mnt_vfc->vfc_typenum;
912		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
913			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
914		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
915			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
916	}
917	return (0);
918}
919
920/*
921 * Go through the disk queues to initiate sandbagged IO;
922 * go through the inodes to write those that have been modified;
923 * initiate the writing of the super block if it has been modified.
924 *
925 * Note: we are always called with the filesystem marked `MPBUSY'.
926 */
927int
928ffs_sync(mp, waitfor, cred, p)
929	struct mount *mp;
930	int waitfor;
931	struct ucred *cred;
932	struct proc *p;
933{
934	struct vnode *nvp, *vp;
935	struct inode *ip;
936	struct ufsmount *ump = VFSTOUFS(mp);
937	struct fs *fs;
938	int error, count, wait, lockreq, allerror = 0;
939
940	fs = ump->um_fs;
941	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
942		printf("fs = %s\n", fs->fs_fsmnt);
943		panic("ffs_sync: rofs mod");
944	}
945	/*
946	 * Write back each (modified) inode.
947	 */
948	wait = 0;
949	lockreq = LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK;
950	if (waitfor == MNT_WAIT) {
951		wait = 1;
952		lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
953	}
954	mtx_lock(&mntvnode_mtx);
955loop:
956	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
957		/*
958		 * If the vnode that we are about to sync is no longer
959		 * associated with this mount point, start over.
960		 */
961		if (vp->v_mount != mp)
962			goto loop;
963		mtx_lock(&vp->v_interlock);
964		nvp = LIST_NEXT(vp, v_mntvnodes);
965		ip = VTOI(vp);
966		if (vp->v_type == VNON || ((ip->i_flag &
967		     (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
968		     TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
969			mtx_unlock(&vp->v_interlock);
970			continue;
971		}
972		if (vp->v_type != VCHR) {
973			mtx_unlock(&mntvnode_mtx);
974			if ((error = vget(vp, lockreq, p)) != 0) {
975				mtx_lock(&mntvnode_mtx);
976				if (error == ENOENT)
977					goto loop;
978				continue;
979			}
980			if ((error = VOP_FSYNC(vp, cred, waitfor, p)) != 0)
981				allerror = error;
982			VOP_UNLOCK(vp, 0, p);
983			vrele(vp);
984			mtx_lock(&mntvnode_mtx);
985		} else {
986			mtx_unlock(&mntvnode_mtx);
987			mtx_unlock(&vp->v_interlock);
988			UFS_UPDATE(vp, wait);
989			mtx_lock(&mntvnode_mtx);
990		}
991	}
992	mtx_unlock(&mntvnode_mtx);
993	/*
994	 * Force stale file system control information to be flushed.
995	 */
996	if (waitfor == MNT_WAIT) {
997		if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
998			allerror = error;
999		/* Flushed work items may create new vnodes to clean */
1000		if (count) {
1001			mtx_lock(&mntvnode_mtx);
1002			goto loop;
1003		}
1004	}
1005#ifdef QUOTA
1006	qsync(mp);
1007#endif
1008	if (waitfor != MNT_LAZY) {
1009		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p);
1010		if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0)
1011			allerror = error;
1012		VOP_UNLOCK(ump->um_devvp, 0, p);
1013	}
1014	/*
1015	 * Write back modified superblock.
1016	 */
1017	if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0)
1018		allerror = error;
1019	return (allerror);
1020}
1021
1022/*
1023 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1024 * in from disk.  If it is in core, wait for the lock bit to clear, then
1025 * return the inode locked.  Detection and handling of mount points must be
1026 * done by the calling routine.
1027 */
1028static int ffs_inode_hash_lock;
1029/*
1030 * ffs_inode_hash_lock is a variable to manage mutual exclusion
1031 * of vnode allocation and intertion to the hash, especially to
1032 * avoid holding more than one vnodes for the same inode in the
1033 * hash table. ffs_inode_hash_lock must hence be tested-and-set
1034 * or cleared atomically, accomplished by ffs_inode_hash_mtx.
1035 *
1036 * As vnode allocation may block during MALLOC() and zone
1037 * allocation, we should also do msleep() to give away the CPU
1038 * if anyone else is allocating a vnode. lockmgr is not suitable
1039 * here because someone else may insert to the hash table the
1040 * vnode we are trying to allocate during our sleep, in which
1041 * case the hash table needs to be examined once again after
1042 * waking up.
1043 */
1044static struct mtx ffs_inode_hash_mtx;
1045
1046int
1047ffs_vget(mp, ino, vpp)
1048	struct mount *mp;
1049	ino_t ino;
1050	struct vnode **vpp;
1051{
1052	struct fs *fs;
1053	struct inode *ip;
1054	struct ufsmount *ump;
1055	struct buf *bp;
1056	struct vnode *vp;
1057	dev_t dev;
1058	int error, want_wakeup;
1059
1060	ump = VFSTOUFS(mp);
1061	dev = ump->um_dev;
1062restart:
1063	if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1064		return (0);
1065	}
1066
1067	/*
1068	 * Lock out the creation of new entries in the FFS hash table in
1069	 * case getnewvnode() or MALLOC() blocks, otherwise a duplicate
1070	 * may occur!
1071	 */
1072	mtx_lock(&ffs_inode_hash_mtx);
1073	if (ffs_inode_hash_lock) {
1074		while (ffs_inode_hash_lock) {
1075			ffs_inode_hash_lock = -1;
1076			msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
1077		}
1078		mtx_unlock(&ffs_inode_hash_mtx);
1079		goto restart;
1080	}
1081	ffs_inode_hash_lock = 1;
1082	mtx_unlock(&ffs_inode_hash_mtx);
1083
1084	/*
1085	 * If this MALLOC() is performed after the getnewvnode()
1086	 * it might block, leaving a vnode with a NULL v_data to be
1087	 * found by ffs_sync() if a sync happens to fire right then,
1088	 * which will cause a panic because ffs_sync() blindly
1089	 * dereferences vp->v_data (as well it should).
1090	 */
1091	MALLOC(ip, struct inode *, sizeof(struct inode),
1092	    ump->um_malloctype, M_WAITOK);
1093
1094	/* Allocate a new vnode/inode. */
1095	error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
1096	if (error) {
1097		/*
1098		 * Do not wake up processes while holding the mutex,
1099		 * otherwise the processes waken up immediately hit
1100		 * themselves into the mutex.
1101		 */
1102		mtx_lock(&ffs_inode_hash_mtx);
1103		want_wakeup = ffs_inode_hash_lock < 0;
1104		ffs_inode_hash_lock = 0;
1105		mtx_unlock(&ffs_inode_hash_mtx);
1106		if (want_wakeup)
1107			wakeup(&ffs_inode_hash_lock);
1108		*vpp = NULL;
1109		FREE(ip, ump->um_malloctype);
1110		return (error);
1111	}
1112	bzero((caddr_t)ip, sizeof(struct inode));
1113	/*
1114	 * FFS supports lock sharing in the stack of vnodes
1115	 */
1116	vp->v_vnlock = &vp->v_lock;
1117	lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE);
1118	vp->v_data = ip;
1119	ip->i_vnode = vp;
1120	ip->i_fs = fs = ump->um_fs;
1121	ip->i_dev = dev;
1122	ip->i_number = ino;
1123#ifdef QUOTA
1124	{
1125		int i;
1126		for (i = 0; i < MAXQUOTAS; i++)
1127			ip->i_dquot[i] = NODQUOT;
1128	}
1129#endif
1130	/*
1131	 * Put it onto its hash chain and lock it so that other requests for
1132	 * this inode will block if they arrive while we are sleeping waiting
1133	 * for old data structures to be purged or for the contents of the
1134	 * disk portion of this inode to be read.
1135	 */
1136	ufs_ihashins(ip);
1137
1138	/*
1139	 * Do not wake up processes while holding the mutex,
1140	 * otherwise the processes waken up immediately hit
1141	 * themselves into the mutex.
1142	 */
1143	mtx_lock(&ffs_inode_hash_mtx);
1144	want_wakeup = ffs_inode_hash_lock < 0;
1145	ffs_inode_hash_lock = 0;
1146	mtx_unlock(&ffs_inode_hash_mtx);
1147	if (want_wakeup)
1148		wakeup(&ffs_inode_hash_lock);
1149
1150	/* Read in the disk contents for the inode, copy into the inode. */
1151	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1152	    (int)fs->fs_bsize, NOCRED, &bp);
1153	if (error) {
1154		/*
1155		 * The inode does not contain anything useful, so it would
1156		 * be misleading to leave it on its hash chain. With mode
1157		 * still zero, it will be unlinked and returned to the free
1158		 * list by vput().
1159		 */
1160		brelse(bp);
1161		vput(vp);
1162		*vpp = NULL;
1163		return (error);
1164	}
1165	ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1166	if (DOINGSOFTDEP(vp))
1167		softdep_load_inodeblock(ip);
1168	else
1169		ip->i_effnlink = ip->i_nlink;
1170	bqrelse(bp);
1171
1172	/*
1173	 * Initialize the vnode from the inode, check for aliases.
1174	 * Note that the underlying vnode may have changed.
1175	 */
1176	error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1177	if (error) {
1178		vput(vp);
1179		*vpp = NULL;
1180		return (error);
1181	}
1182	/*
1183	 * Finish inode initialization now that aliasing has been resolved.
1184	 */
1185	ip->i_devvp = ump->um_devvp;
1186	VREF(ip->i_devvp);
1187	/*
1188	 * Set up a generation number for this inode if it does not
1189	 * already have one. This should only happen on old filesystems.
1190	 */
1191	if (ip->i_gen == 0) {
1192		ip->i_gen = random() / 2 + 1;
1193		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1194			ip->i_flag |= IN_MODIFIED;
1195	}
1196	/*
1197	 * Ensure that uid and gid are correct. This is a temporary
1198	 * fix until fsck has been changed to do the update.
1199	 */
1200	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
1201		ip->i_uid = ip->i_din.di_ouid;		/* XXX */
1202		ip->i_gid = ip->i_din.di_ogid;		/* XXX */
1203	}						/* XXX */
1204
1205	*vpp = vp;
1206	return (0);
1207}
1208
1209/*
1210 * File handle to vnode
1211 *
1212 * Have to be really careful about stale file handles:
1213 * - check that the inode number is valid
1214 * - call ffs_vget() to get the locked inode
1215 * - check for an unallocated inode (i_mode == 0)
1216 * - check that the given client host has export rights and return
1217 *   those rights via. exflagsp and credanonp
1218 */
1219int
1220ffs_fhtovp(mp, fhp, vpp)
1221	register struct mount *mp;
1222	struct fid *fhp;
1223	struct vnode **vpp;
1224{
1225	register struct ufid *ufhp;
1226	struct fs *fs;
1227
1228	ufhp = (struct ufid *)fhp;
1229	fs = VFSTOUFS(mp)->um_fs;
1230	if (ufhp->ufid_ino < ROOTINO ||
1231	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1232		return (ESTALE);
1233	return (ufs_fhtovp(mp, ufhp, vpp));
1234}
1235
1236/*
1237 * Vnode pointer to File handle
1238 */
1239/* ARGSUSED */
1240int
1241ffs_vptofh(vp, fhp)
1242	struct vnode *vp;
1243	struct fid *fhp;
1244{
1245	register struct inode *ip;
1246	register struct ufid *ufhp;
1247
1248	ip = VTOI(vp);
1249	ufhp = (struct ufid *)fhp;
1250	ufhp->ufid_len = sizeof(struct ufid);
1251	ufhp->ufid_ino = ip->i_number;
1252	ufhp->ufid_gen = ip->i_gen;
1253	return (0);
1254}
1255
1256/*
1257 * Initialize the filesystem; just use ufs_init.
1258 */
1259static int
1260ffs_init(vfsp)
1261	struct vfsconf *vfsp;
1262{
1263
1264	softdep_initialize();
1265	mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF);
1266	return (ufs_init(vfsp));
1267}
1268
1269/*
1270 * Write a superblock and associated information back to disk.
1271 */
1272static int
1273ffs_sbupdate(mp, waitfor)
1274	struct ufsmount *mp;
1275	int waitfor;
1276{
1277	register struct fs *dfs, *fs = mp->um_fs;
1278	register struct buf *bp;
1279	int blks;
1280	void *space;
1281	int i, size, error, allerror = 0;
1282
1283	/*
1284	 * First write back the summary information.
1285	 */
1286	blks = howmany(fs->fs_cssize, fs->fs_fsize);
1287	space = fs->fs_csp;
1288	for (i = 0; i < blks; i += fs->fs_frag) {
1289		size = fs->fs_bsize;
1290		if (i + fs->fs_frag > blks)
1291			size = (blks - i) * fs->fs_fsize;
1292		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1293		    size, 0, 0);
1294		bcopy(space, bp->b_data, (u_int)size);
1295		space = (char *)space + size;
1296		if (waitfor != MNT_WAIT)
1297			bawrite(bp);
1298		else if ((error = bwrite(bp)) != 0)
1299			allerror = error;
1300	}
1301	/*
1302	 * Now write back the superblock itself. If any errors occurred
1303	 * up to this point, then fail so that the superblock avoids
1304	 * being written out as clean.
1305	 */
1306	if (allerror)
1307		return (allerror);
1308	bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
1309	fs->fs_fmod = 0;
1310	fs->fs_time = time_second;
1311	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1312	/* Restore compatibility to old file systems.		   XXX */
1313	dfs = (struct fs *)bp->b_data;				/* XXX */
1314	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
1315		dfs->fs_nrpos = -1;				/* XXX */
1316	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
1317		int32_t *lp, tmp;				/* XXX */
1318								/* XXX */
1319		lp = (int32_t *)&dfs->fs_qbmask;		/* XXX */
1320		tmp = lp[4];					/* XXX */
1321		for (i = 4; i > 0; i--)				/* XXX */
1322			lp[i] = lp[i-1];			/* XXX */
1323		lp[0] = tmp;					/* XXX */
1324	}							/* XXX */
1325	dfs->fs_maxfilesize = mp->um_savedmaxfilesize;		/* XXX */
1326	if (waitfor != MNT_WAIT)
1327		bawrite(bp);
1328	else if ((error = bwrite(bp)) != 0)
1329		allerror = error;
1330	return (allerror);
1331}
1332