ffs_alloc.c revision 280258
1/*-
2 * Copyright (c) 2002 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9 * research program
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Copyright (c) 1982, 1986, 1989, 1993
33 *	The Regents of the University of California.  All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 *    notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 *    notice, this list of conditions and the following disclaimer in the
42 *    documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 *    may be used to endorse or promote products derived from this software
45 *    without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 *	@(#)ffs_alloc.c	8.18 (Berkeley) 5/26/95
60 */
61
62#include <sys/cdefs.h>
63__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_alloc.c 280258 2015-03-19 13:37:36Z rwatson $");
64
65#include "opt_quota.h"
66
67#include <sys/param.h>
68#include <sys/capsicum.h>
69#include <sys/systm.h>
70#include <sys/bio.h>
71#include <sys/buf.h>
72#include <sys/conf.h>
73#include <sys/fcntl.h>
74#include <sys/file.h>
75#include <sys/filedesc.h>
76#include <sys/priv.h>
77#include <sys/proc.h>
78#include <sys/vnode.h>
79#include <sys/mount.h>
80#include <sys/kernel.h>
81#include <sys/syscallsubr.h>
82#include <sys/sysctl.h>
83#include <sys/syslog.h>
84#include <sys/taskqueue.h>
85
86#include <security/audit/audit.h>
87
88#include <geom/geom.h>
89
90#include <ufs/ufs/dir.h>
91#include <ufs/ufs/extattr.h>
92#include <ufs/ufs/quota.h>
93#include <ufs/ufs/inode.h>
94#include <ufs/ufs/ufs_extern.h>
95#include <ufs/ufs/ufsmount.h>
96
97#include <ufs/ffs/fs.h>
98#include <ufs/ffs/ffs_extern.h>
99#include <ufs/ffs/softdep.h>
100
101typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
102				  int size, int rsize);
103
104static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
105static ufs2_daddr_t
106	      ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
107static void	ffs_blkfree_cg(struct ufsmount *, struct fs *,
108		    struct vnode *, ufs2_daddr_t, long, ino_t,
109		    struct workhead *);
110static void	ffs_blkfree_trim_completed(struct bio *);
111static void	ffs_blkfree_trim_task(void *ctx, int pending __unused);
112#ifdef INVARIANTS
113static int	ffs_checkblk(struct inode *, ufs2_daddr_t, long);
114#endif
115static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int,
116		    int);
117static ino_t	ffs_dirpref(struct inode *);
118static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
119		    int, int);
120static ufs2_daddr_t	ffs_hashalloc
121		(struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
122static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
123		    int);
124static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
125static int	ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
126static int	ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
127
128/*
129 * Allocate a block in the filesystem.
130 *
131 * The size of the requested block is given, which must be some
132 * multiple of fs_fsize and <= fs_bsize.
133 * A preference may be optionally specified. If a preference is given
134 * the following hierarchy is used to allocate a block:
135 *   1) allocate the requested block.
136 *   2) allocate a rotationally optimal block in the same cylinder.
137 *   3) allocate a block in the same cylinder group.
138 *   4) quadradically rehash into other cylinder groups, until an
139 *      available block is located.
140 * If no block preference is given the following hierarchy is used
141 * to allocate a block:
142 *   1) allocate a block in the cylinder group that contains the
143 *      inode for the file.
144 *   2) quadradically rehash into other cylinder groups, until an
145 *      available block is located.
146 */
147int
148ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
149	struct inode *ip;
150	ufs2_daddr_t lbn, bpref;
151	int size, flags;
152	struct ucred *cred;
153	ufs2_daddr_t *bnp;
154{
155	struct fs *fs;
156	struct ufsmount *ump;
157	ufs2_daddr_t bno;
158	u_int cg, reclaimed;
159	static struct timeval lastfail;
160	static int curfail;
161	int64_t delta;
162#ifdef QUOTA
163	int error;
164#endif
165
166	*bnp = 0;
167	fs = ip->i_fs;
168	ump = ip->i_ump;
169	mtx_assert(UFS_MTX(ump), MA_OWNED);
170#ifdef INVARIANTS
171	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
172		printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
173		    devtoname(ip->i_dev), (long)fs->fs_bsize, size,
174		    fs->fs_fsmnt);
175		panic("ffs_alloc: bad size");
176	}
177	if (cred == NOCRED)
178		panic("ffs_alloc: missing credential");
179#endif /* INVARIANTS */
180	reclaimed = 0;
181retry:
182#ifdef QUOTA
183	UFS_UNLOCK(ump);
184	error = chkdq(ip, btodb(size), cred, 0);
185	if (error)
186		return (error);
187	UFS_LOCK(ump);
188#endif
189	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
190		goto nospace;
191	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
192	    freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
193		goto nospace;
194	if (bpref >= fs->fs_size)
195		bpref = 0;
196	if (bpref == 0)
197		cg = ino_to_cg(fs, ip->i_number);
198	else
199		cg = dtog(fs, bpref);
200	bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
201	if (bno > 0) {
202		delta = btodb(size);
203		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
204		if (flags & IO_EXT)
205			ip->i_flag |= IN_CHANGE;
206		else
207			ip->i_flag |= IN_CHANGE | IN_UPDATE;
208		*bnp = bno;
209		return (0);
210	}
211nospace:
212#ifdef QUOTA
213	UFS_UNLOCK(ump);
214	/*
215	 * Restore user's disk quota because allocation failed.
216	 */
217	(void) chkdq(ip, -btodb(size), cred, FORCE);
218	UFS_LOCK(ump);
219#endif
220	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
221		reclaimed = 1;
222		softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
223		goto retry;
224	}
225	UFS_UNLOCK(ump);
226	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
227		ffs_fserr(fs, ip->i_number, "filesystem full");
228		uprintf("\n%s: write failed, filesystem is full\n",
229		    fs->fs_fsmnt);
230	}
231	return (ENOSPC);
232}
233
234/*
235 * Reallocate a fragment to a bigger size
236 *
237 * The number and size of the old block is given, and a preference
238 * and new size is also specified. The allocator attempts to extend
239 * the original block. Failing that, the regular block allocator is
240 * invoked to get an appropriate block.
241 */
242int
243ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
244	struct inode *ip;
245	ufs2_daddr_t lbprev;
246	ufs2_daddr_t bprev;
247	ufs2_daddr_t bpref;
248	int osize, nsize, flags;
249	struct ucred *cred;
250	struct buf **bpp;
251{
252	struct vnode *vp;
253	struct fs *fs;
254	struct buf *bp;
255	struct ufsmount *ump;
256	u_int cg, request, reclaimed;
257	int error, gbflags;
258	ufs2_daddr_t bno;
259	static struct timeval lastfail;
260	static int curfail;
261	int64_t delta;
262
263	*bpp = 0;
264	vp = ITOV(ip);
265	fs = ip->i_fs;
266	bp = NULL;
267	ump = ip->i_ump;
268	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
269
270	mtx_assert(UFS_MTX(ump), MA_OWNED);
271#ifdef INVARIANTS
272	if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
273		panic("ffs_realloccg: allocation on suspended filesystem");
274	if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
275	    (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
276		printf(
277		"dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
278		    devtoname(ip->i_dev), (long)fs->fs_bsize, osize,
279		    nsize, fs->fs_fsmnt);
280		panic("ffs_realloccg: bad size");
281	}
282	if (cred == NOCRED)
283		panic("ffs_realloccg: missing credential");
284#endif /* INVARIANTS */
285	reclaimed = 0;
286retry:
287	if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
288	    freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0) {
289		goto nospace;
290	}
291	if (bprev == 0) {
292		printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
293		    devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev,
294		    fs->fs_fsmnt);
295		panic("ffs_realloccg: bad bprev");
296	}
297	UFS_UNLOCK(ump);
298	/*
299	 * Allocate the extra space in the buffer.
300	 */
301	error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
302	if (error) {
303		brelse(bp);
304		return (error);
305	}
306
307	if (bp->b_blkno == bp->b_lblkno) {
308		if (lbprev >= NDADDR)
309			panic("ffs_realloccg: lbprev out of range");
310		bp->b_blkno = fsbtodb(fs, bprev);
311	}
312
313#ifdef QUOTA
314	error = chkdq(ip, btodb(nsize - osize), cred, 0);
315	if (error) {
316		brelse(bp);
317		return (error);
318	}
319#endif
320	/*
321	 * Check for extension in the existing location.
322	 */
323	cg = dtog(fs, bprev);
324	UFS_LOCK(ump);
325	bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
326	if (bno) {
327		if (bp->b_blkno != fsbtodb(fs, bno))
328			panic("ffs_realloccg: bad blockno");
329		delta = btodb(nsize - osize);
330		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
331		if (flags & IO_EXT)
332			ip->i_flag |= IN_CHANGE;
333		else
334			ip->i_flag |= IN_CHANGE | IN_UPDATE;
335		allocbuf(bp, nsize);
336		bp->b_flags |= B_DONE;
337		vfs_bio_bzero_buf(bp, osize, nsize - osize);
338		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
339			vfs_bio_set_valid(bp, osize, nsize - osize);
340		*bpp = bp;
341		return (0);
342	}
343	/*
344	 * Allocate a new disk location.
345	 */
346	if (bpref >= fs->fs_size)
347		bpref = 0;
348	switch ((int)fs->fs_optim) {
349	case FS_OPTSPACE:
350		/*
351		 * Allocate an exact sized fragment. Although this makes
352		 * best use of space, we will waste time relocating it if
353		 * the file continues to grow. If the fragmentation is
354		 * less than half of the minimum free reserve, we choose
355		 * to begin optimizing for time.
356		 */
357		request = nsize;
358		if (fs->fs_minfree <= 5 ||
359		    fs->fs_cstotal.cs_nffree >
360		    (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
361			break;
362		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
363			fs->fs_fsmnt);
364		fs->fs_optim = FS_OPTTIME;
365		break;
366	case FS_OPTTIME:
367		/*
368		 * At this point we have discovered a file that is trying to
369		 * grow a small fragment to a larger fragment. To save time,
370		 * we allocate a full sized block, then free the unused portion.
371		 * If the file continues to grow, the `ffs_fragextend' call
372		 * above will be able to grow it in place without further
373		 * copying. If aberrant programs cause disk fragmentation to
374		 * grow within 2% of the free reserve, we choose to begin
375		 * optimizing for space.
376		 */
377		request = fs->fs_bsize;
378		if (fs->fs_cstotal.cs_nffree <
379		    (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
380			break;
381		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
382			fs->fs_fsmnt);
383		fs->fs_optim = FS_OPTSPACE;
384		break;
385	default:
386		printf("dev = %s, optim = %ld, fs = %s\n",
387		    devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt);
388		panic("ffs_realloccg: bad optim");
389		/* NOTREACHED */
390	}
391	bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
392	if (bno > 0) {
393		bp->b_blkno = fsbtodb(fs, bno);
394		if (!DOINGSOFTDEP(vp))
395			ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize,
396			    ip->i_number, vp->v_type, NULL);
397		delta = btodb(nsize - osize);
398		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
399		if (flags & IO_EXT)
400			ip->i_flag |= IN_CHANGE;
401		else
402			ip->i_flag |= IN_CHANGE | IN_UPDATE;
403		allocbuf(bp, nsize);
404		bp->b_flags |= B_DONE;
405		vfs_bio_bzero_buf(bp, osize, nsize - osize);
406		if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
407			vfs_bio_set_valid(bp, osize, nsize - osize);
408		*bpp = bp;
409		return (0);
410	}
411#ifdef QUOTA
412	UFS_UNLOCK(ump);
413	/*
414	 * Restore user's disk quota because allocation failed.
415	 */
416	(void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
417	UFS_LOCK(ump);
418#endif
419nospace:
420	/*
421	 * no space available
422	 */
423	if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
424		reclaimed = 1;
425		UFS_UNLOCK(ump);
426		if (bp) {
427			brelse(bp);
428			bp = NULL;
429		}
430		UFS_LOCK(ump);
431		softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
432		goto retry;
433	}
434	UFS_UNLOCK(ump);
435	if (bp)
436		brelse(bp);
437	if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
438		ffs_fserr(fs, ip->i_number, "filesystem full");
439		uprintf("\n%s: write failed, filesystem is full\n",
440		    fs->fs_fsmnt);
441	}
442	return (ENOSPC);
443}
444
445/*
446 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
447 *
448 * The vnode and an array of buffer pointers for a range of sequential
449 * logical blocks to be made contiguous is given. The allocator attempts
450 * to find a range of sequential blocks starting as close as possible
451 * from the end of the allocation for the logical block immediately
452 * preceding the current range. If successful, the physical block numbers
453 * in the buffer pointers and in the inode are changed to reflect the new
454 * allocation. If unsuccessful, the allocation is left unchanged. The
455 * success in doing the reallocation is returned. Note that the error
456 * return is not reflected back to the user. Rather the previous block
457 * allocation will be used.
458 */
459
460SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
461
462static int doasyncfree = 1;
463SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "");
464
465static int doreallocblks = 1;
466SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
467
468#ifdef DEBUG
469static volatile int prtrealloc = 0;
470#endif
471
472int
473ffs_reallocblks(ap)
474	struct vop_reallocblks_args /* {
475		struct vnode *a_vp;
476		struct cluster_save *a_buflist;
477	} */ *ap;
478{
479
480	if (doreallocblks == 0)
481		return (ENOSPC);
482	/*
483	 * We can't wait in softdep prealloc as it may fsync and recurse
484	 * here.  Instead we simply fail to reallocate blocks if this
485	 * rare condition arises.
486	 */
487	if (DOINGSOFTDEP(ap->a_vp))
488		if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
489			return (ENOSPC);
490	if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1)
491		return (ffs_reallocblks_ufs1(ap));
492	return (ffs_reallocblks_ufs2(ap));
493}
494
495static int
496ffs_reallocblks_ufs1(ap)
497	struct vop_reallocblks_args /* {
498		struct vnode *a_vp;
499		struct cluster_save *a_buflist;
500	} */ *ap;
501{
502	struct fs *fs;
503	struct inode *ip;
504	struct vnode *vp;
505	struct buf *sbp, *ebp;
506	ufs1_daddr_t *bap, *sbap, *ebap = 0;
507	struct cluster_save *buflist;
508	struct ufsmount *ump;
509	ufs_lbn_t start_lbn, end_lbn;
510	ufs1_daddr_t soff, newblk, blkno;
511	ufs2_daddr_t pref;
512	struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
513	int i, len, start_lvl, end_lvl, ssize;
514
515	vp = ap->a_vp;
516	ip = VTOI(vp);
517	fs = ip->i_fs;
518	ump = ip->i_ump;
519	/*
520	 * If we are not tracking block clusters or if we have less than 4%
521	 * free blocks left, then do not attempt to cluster. Running with
522	 * less than 5% free block reserve is not recommended and those that
523	 * choose to do so do not expect to have good file layout.
524	 */
525	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
526		return (ENOSPC);
527	buflist = ap->a_buflist;
528	len = buflist->bs_nchildren;
529	start_lbn = buflist->bs_children[0]->b_lblkno;
530	end_lbn = start_lbn + len - 1;
531#ifdef INVARIANTS
532	for (i = 0; i < len; i++)
533		if (!ffs_checkblk(ip,
534		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
535			panic("ffs_reallocblks: unallocated block 1");
536	for (i = 1; i < len; i++)
537		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
538			panic("ffs_reallocblks: non-logical cluster");
539	blkno = buflist->bs_children[0]->b_blkno;
540	ssize = fsbtodb(fs, fs->fs_frag);
541	for (i = 1; i < len - 1; i++)
542		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
543			panic("ffs_reallocblks: non-physical cluster %d", i);
544#endif
545	/*
546	 * If the cluster crosses the boundary for the first indirect
547	 * block, leave space for the indirect block. Indirect blocks
548	 * are initially laid out in a position after the last direct
549	 * block. Block reallocation would usually destroy locality by
550	 * moving the indirect block out of the way to make room for
551	 * data blocks if we didn't compensate here. We should also do
552	 * this for other indirect block boundaries, but it is only
553	 * important for the first one.
554	 */
555	if (start_lbn < NDADDR && end_lbn >= NDADDR)
556		return (ENOSPC);
557	/*
558	 * If the latest allocation is in a new cylinder group, assume that
559	 * the filesystem has decided to move and do not force it back to
560	 * the previous cylinder group.
561	 */
562	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
563	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
564		return (ENOSPC);
565	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
566	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
567		return (ENOSPC);
568	/*
569	 * Get the starting offset and block map for the first block.
570	 */
571	if (start_lvl == 0) {
572		sbap = &ip->i_din1->di_db[0];
573		soff = start_lbn;
574	} else {
575		idp = &start_ap[start_lvl - 1];
576		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
577			brelse(sbp);
578			return (ENOSPC);
579		}
580		sbap = (ufs1_daddr_t *)sbp->b_data;
581		soff = idp->in_off;
582	}
583	/*
584	 * If the block range spans two block maps, get the second map.
585	 */
586	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
587		ssize = len;
588	} else {
589#ifdef INVARIANTS
590		if (start_lvl > 0 &&
591		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
592			panic("ffs_reallocblk: start == end");
593#endif
594		ssize = len - (idp->in_off + 1);
595		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
596			goto fail;
597		ebap = (ufs1_daddr_t *)ebp->b_data;
598	}
599	/*
600	 * Find the preferred location for the cluster.
601	 */
602	UFS_LOCK(ump);
603	pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
604	/*
605	 * Search the block map looking for an allocation of the desired size.
606	 */
607	if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
608	    len, len, ffs_clusteralloc)) == 0) {
609		UFS_UNLOCK(ump);
610		goto fail;
611	}
612	/*
613	 * We have found a new contiguous block.
614	 *
615	 * First we have to replace the old block pointers with the new
616	 * block pointers in the inode and indirect blocks associated
617	 * with the file.
618	 */
619#ifdef DEBUG
620	if (prtrealloc)
621		printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
622		    (uintmax_t)ip->i_number,
623		    (intmax_t)start_lbn, (intmax_t)end_lbn);
624#endif
625	blkno = newblk;
626	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
627		if (i == ssize) {
628			bap = ebap;
629			soff = -i;
630		}
631#ifdef INVARIANTS
632		if (!ffs_checkblk(ip,
633		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
634			panic("ffs_reallocblks: unallocated block 2");
635		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
636			panic("ffs_reallocblks: alloc mismatch");
637#endif
638#ifdef DEBUG
639		if (prtrealloc)
640			printf(" %d,", *bap);
641#endif
642		if (DOINGSOFTDEP(vp)) {
643			if (sbap == &ip->i_din1->di_db[0] && i < ssize)
644				softdep_setup_allocdirect(ip, start_lbn + i,
645				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
646				    buflist->bs_children[i]);
647			else
648				softdep_setup_allocindir_page(ip, start_lbn + i,
649				    i < ssize ? sbp : ebp, soff + i, blkno,
650				    *bap, buflist->bs_children[i]);
651		}
652		*bap++ = blkno;
653	}
654	/*
655	 * Next we must write out the modified inode and indirect blocks.
656	 * For strict correctness, the writes should be synchronous since
657	 * the old block values may have been written to disk. In practise
658	 * they are almost never written, but if we are concerned about
659	 * strict correctness, the `doasyncfree' flag should be set to zero.
660	 *
661	 * The test on `doasyncfree' should be changed to test a flag
662	 * that shows whether the associated buffers and inodes have
663	 * been written. The flag should be set when the cluster is
664	 * started and cleared whenever the buffer or inode is flushed.
665	 * We can then check below to see if it is set, and do the
666	 * synchronous write only when it has been cleared.
667	 */
668	if (sbap != &ip->i_din1->di_db[0]) {
669		if (doasyncfree)
670			bdwrite(sbp);
671		else
672			bwrite(sbp);
673	} else {
674		ip->i_flag |= IN_CHANGE | IN_UPDATE;
675		if (!doasyncfree)
676			ffs_update(vp, 1);
677	}
678	if (ssize < len) {
679		if (doasyncfree)
680			bdwrite(ebp);
681		else
682			bwrite(ebp);
683	}
684	/*
685	 * Last, free the old blocks and assign the new blocks to the buffers.
686	 */
687#ifdef DEBUG
688	if (prtrealloc)
689		printf("\n\tnew:");
690#endif
691	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
692		if (!DOINGSOFTDEP(vp))
693			ffs_blkfree(ump, fs, ip->i_devvp,
694			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
695			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
696		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
697#ifdef INVARIANTS
698		if (!ffs_checkblk(ip,
699		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
700			panic("ffs_reallocblks: unallocated block 3");
701#endif
702#ifdef DEBUG
703		if (prtrealloc)
704			printf(" %d,", blkno);
705#endif
706	}
707#ifdef DEBUG
708	if (prtrealloc) {
709		prtrealloc--;
710		printf("\n");
711	}
712#endif
713	return (0);
714
715fail:
716	if (ssize < len)
717		brelse(ebp);
718	if (sbap != &ip->i_din1->di_db[0])
719		brelse(sbp);
720	return (ENOSPC);
721}
722
723static int
724ffs_reallocblks_ufs2(ap)
725	struct vop_reallocblks_args /* {
726		struct vnode *a_vp;
727		struct cluster_save *a_buflist;
728	} */ *ap;
729{
730	struct fs *fs;
731	struct inode *ip;
732	struct vnode *vp;
733	struct buf *sbp, *ebp;
734	ufs2_daddr_t *bap, *sbap, *ebap = 0;
735	struct cluster_save *buflist;
736	struct ufsmount *ump;
737	ufs_lbn_t start_lbn, end_lbn;
738	ufs2_daddr_t soff, newblk, blkno, pref;
739	struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
740	int i, len, start_lvl, end_lvl, ssize;
741
742	vp = ap->a_vp;
743	ip = VTOI(vp);
744	fs = ip->i_fs;
745	ump = ip->i_ump;
746	/*
747	 * If we are not tracking block clusters or if we have less than 4%
748	 * free blocks left, then do not attempt to cluster. Running with
749	 * less than 5% free block reserve is not recommended and those that
750	 * choose to do so do not expect to have good file layout.
751	 */
752	if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
753		return (ENOSPC);
754	buflist = ap->a_buflist;
755	len = buflist->bs_nchildren;
756	start_lbn = buflist->bs_children[0]->b_lblkno;
757	end_lbn = start_lbn + len - 1;
758#ifdef INVARIANTS
759	for (i = 0; i < len; i++)
760		if (!ffs_checkblk(ip,
761		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
762			panic("ffs_reallocblks: unallocated block 1");
763	for (i = 1; i < len; i++)
764		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
765			panic("ffs_reallocblks: non-logical cluster");
766	blkno = buflist->bs_children[0]->b_blkno;
767	ssize = fsbtodb(fs, fs->fs_frag);
768	for (i = 1; i < len - 1; i++)
769		if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
770			panic("ffs_reallocblks: non-physical cluster %d", i);
771#endif
772	/*
773	 * If the cluster crosses the boundary for the first indirect
774	 * block, do not move anything in it. Indirect blocks are
775	 * usually initially laid out in a position between the data
776	 * blocks. Block reallocation would usually destroy locality by
777	 * moving the indirect block out of the way to make room for
778	 * data blocks if we didn't compensate here. We should also do
779	 * this for other indirect block boundaries, but it is only
780	 * important for the first one.
781	 */
782	if (start_lbn < NDADDR && end_lbn >= NDADDR)
783		return (ENOSPC);
784	/*
785	 * If the latest allocation is in a new cylinder group, assume that
786	 * the filesystem has decided to move and do not force it back to
787	 * the previous cylinder group.
788	 */
789	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
790	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
791		return (ENOSPC);
792	if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
793	    ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
794		return (ENOSPC);
795	/*
796	 * Get the starting offset and block map for the first block.
797	 */
798	if (start_lvl == 0) {
799		sbap = &ip->i_din2->di_db[0];
800		soff = start_lbn;
801	} else {
802		idp = &start_ap[start_lvl - 1];
803		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
804			brelse(sbp);
805			return (ENOSPC);
806		}
807		sbap = (ufs2_daddr_t *)sbp->b_data;
808		soff = idp->in_off;
809	}
810	/*
811	 * If the block range spans two block maps, get the second map.
812	 */
813	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
814		ssize = len;
815	} else {
816#ifdef INVARIANTS
817		if (start_lvl > 0 &&
818		    start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
819			panic("ffs_reallocblk: start == end");
820#endif
821		ssize = len - (idp->in_off + 1);
822		if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
823			goto fail;
824		ebap = (ufs2_daddr_t *)ebp->b_data;
825	}
826	/*
827	 * Find the preferred location for the cluster.
828	 */
829	UFS_LOCK(ump);
830	pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
831	/*
832	 * Search the block map looking for an allocation of the desired size.
833	 */
834	if ((newblk = ffs_hashalloc(ip, dtog(fs, pref), pref,
835	    len, len, ffs_clusteralloc)) == 0) {
836		UFS_UNLOCK(ump);
837		goto fail;
838	}
839	/*
840	 * We have found a new contiguous block.
841	 *
842	 * First we have to replace the old block pointers with the new
843	 * block pointers in the inode and indirect blocks associated
844	 * with the file.
845	 */
846#ifdef DEBUG
847	if (prtrealloc)
848		printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
849		    (intmax_t)start_lbn, (intmax_t)end_lbn);
850#endif
851	blkno = newblk;
852	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
853		if (i == ssize) {
854			bap = ebap;
855			soff = -i;
856		}
857#ifdef INVARIANTS
858		if (!ffs_checkblk(ip,
859		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
860			panic("ffs_reallocblks: unallocated block 2");
861		if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
862			panic("ffs_reallocblks: alloc mismatch");
863#endif
864#ifdef DEBUG
865		if (prtrealloc)
866			printf(" %jd,", (intmax_t)*bap);
867#endif
868		if (DOINGSOFTDEP(vp)) {
869			if (sbap == &ip->i_din2->di_db[0] && i < ssize)
870				softdep_setup_allocdirect(ip, start_lbn + i,
871				    blkno, *bap, fs->fs_bsize, fs->fs_bsize,
872				    buflist->bs_children[i]);
873			else
874				softdep_setup_allocindir_page(ip, start_lbn + i,
875				    i < ssize ? sbp : ebp, soff + i, blkno,
876				    *bap, buflist->bs_children[i]);
877		}
878		*bap++ = blkno;
879	}
880	/*
881	 * Next we must write out the modified inode and indirect blocks.
882	 * For strict correctness, the writes should be synchronous since
883	 * the old block values may have been written to disk. In practise
884	 * they are almost never written, but if we are concerned about
885	 * strict correctness, the `doasyncfree' flag should be set to zero.
886	 *
887	 * The test on `doasyncfree' should be changed to test a flag
888	 * that shows whether the associated buffers and inodes have
889	 * been written. The flag should be set when the cluster is
890	 * started and cleared whenever the buffer or inode is flushed.
891	 * We can then check below to see if it is set, and do the
892	 * synchronous write only when it has been cleared.
893	 */
894	if (sbap != &ip->i_din2->di_db[0]) {
895		if (doasyncfree)
896			bdwrite(sbp);
897		else
898			bwrite(sbp);
899	} else {
900		ip->i_flag |= IN_CHANGE | IN_UPDATE;
901		if (!doasyncfree)
902			ffs_update(vp, 1);
903	}
904	if (ssize < len) {
905		if (doasyncfree)
906			bdwrite(ebp);
907		else
908			bwrite(ebp);
909	}
910	/*
911	 * Last, free the old blocks and assign the new blocks to the buffers.
912	 */
913#ifdef DEBUG
914	if (prtrealloc)
915		printf("\n\tnew:");
916#endif
917	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
918		if (!DOINGSOFTDEP(vp))
919			ffs_blkfree(ump, fs, ip->i_devvp,
920			    dbtofsb(fs, buflist->bs_children[i]->b_blkno),
921			    fs->fs_bsize, ip->i_number, vp->v_type, NULL);
922		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
923#ifdef INVARIANTS
924		if (!ffs_checkblk(ip,
925		   dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
926			panic("ffs_reallocblks: unallocated block 3");
927#endif
928#ifdef DEBUG
929		if (prtrealloc)
930			printf(" %jd,", (intmax_t)blkno);
931#endif
932	}
933#ifdef DEBUG
934	if (prtrealloc) {
935		prtrealloc--;
936		printf("\n");
937	}
938#endif
939	return (0);
940
941fail:
942	if (ssize < len)
943		brelse(ebp);
944	if (sbap != &ip->i_din2->di_db[0])
945		brelse(sbp);
946	return (ENOSPC);
947}
948
949/*
950 * Allocate an inode in the filesystem.
951 *
952 * If allocating a directory, use ffs_dirpref to select the inode.
953 * If allocating in a directory, the following hierarchy is followed:
954 *   1) allocate the preferred inode.
955 *   2) allocate an inode in the same cylinder group.
956 *   3) quadradically rehash into other cylinder groups, until an
957 *      available inode is located.
958 * If no inode preference is given the following hierarchy is used
959 * to allocate an inode:
960 *   1) allocate an inode in cylinder group 0.
961 *   2) quadradically rehash into other cylinder groups, until an
962 *      available inode is located.
963 */
964int
965ffs_valloc(pvp, mode, cred, vpp)
966	struct vnode *pvp;
967	int mode;
968	struct ucred *cred;
969	struct vnode **vpp;
970{
971	struct inode *pip;
972	struct fs *fs;
973	struct inode *ip;
974	struct timespec ts;
975	struct ufsmount *ump;
976	ino_t ino, ipref;
977	u_int cg;
978	int error, error1, reclaimed;
979	static struct timeval lastfail;
980	static int curfail;
981
982	*vpp = NULL;
983	pip = VTOI(pvp);
984	fs = pip->i_fs;
985	ump = pip->i_ump;
986
987	UFS_LOCK(ump);
988	reclaimed = 0;
989retry:
990	if (fs->fs_cstotal.cs_nifree == 0)
991		goto noinodes;
992
993	if ((mode & IFMT) == IFDIR)
994		ipref = ffs_dirpref(pip);
995	else
996		ipref = pip->i_number;
997	if (ipref >= fs->fs_ncg * fs->fs_ipg)
998		ipref = 0;
999	cg = ino_to_cg(fs, ipref);
1000	/*
1001	 * Track number of dirs created one after another
1002	 * in a same cg without intervening by files.
1003	 */
1004	if ((mode & IFMT) == IFDIR) {
1005		if (fs->fs_contigdirs[cg] < 255)
1006			fs->fs_contigdirs[cg]++;
1007	} else {
1008		if (fs->fs_contigdirs[cg] > 0)
1009			fs->fs_contigdirs[cg]--;
1010	}
1011	ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1012					(allocfcn_t *)ffs_nodealloccg);
1013	if (ino == 0)
1014		goto noinodes;
1015	error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1016	if (error) {
1017		error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1018		    FFSV_FORCEINSMQ);
1019		ffs_vfree(pvp, ino, mode);
1020		if (error1 == 0) {
1021			ip = VTOI(*vpp);
1022			if (ip->i_mode)
1023				goto dup_alloc;
1024			ip->i_flag |= IN_MODIFIED;
1025			vput(*vpp);
1026		}
1027		return (error);
1028	}
1029	ip = VTOI(*vpp);
1030	if (ip->i_mode) {
1031dup_alloc:
1032		printf("mode = 0%o, inum = %lu, fs = %s\n",
1033		    ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt);
1034		panic("ffs_valloc: dup alloc");
1035	}
1036	if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) {  /* XXX */
1037		printf("free inode %s/%lu had %ld blocks\n",
1038		    fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1039		DIP_SET(ip, i_blocks, 0);
1040	}
1041	ip->i_flags = 0;
1042	DIP_SET(ip, i_flags, 0);
1043	/*
1044	 * Set up a new generation number for this inode.
1045	 */
1046	if (ip->i_gen == 0 || ++ip->i_gen == 0)
1047		ip->i_gen = arc4random() / 2 + 1;
1048	DIP_SET(ip, i_gen, ip->i_gen);
1049	if (fs->fs_magic == FS_UFS2_MAGIC) {
1050		vfs_timestamp(&ts);
1051		ip->i_din2->di_birthtime = ts.tv_sec;
1052		ip->i_din2->di_birthnsec = ts.tv_nsec;
1053	}
1054	ufs_prepare_reclaim(*vpp);
1055	ip->i_flag = 0;
1056	(*vpp)->v_vflag = 0;
1057	(*vpp)->v_type = VNON;
1058	if (fs->fs_magic == FS_UFS2_MAGIC)
1059		(*vpp)->v_op = &ffs_vnodeops2;
1060	else
1061		(*vpp)->v_op = &ffs_vnodeops1;
1062	return (0);
1063noinodes:
1064	if (reclaimed == 0) {
1065		reclaimed = 1;
1066		softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1067		goto retry;
1068	}
1069	UFS_UNLOCK(ump);
1070	if (ppsratecheck(&lastfail, &curfail, 1)) {
1071		ffs_fserr(fs, pip->i_number, "out of inodes");
1072		uprintf("\n%s: create/symlink failed, no inodes free\n",
1073		    fs->fs_fsmnt);
1074	}
1075	return (ENOSPC);
1076}
1077
1078/*
1079 * Find a cylinder group to place a directory.
1080 *
1081 * The policy implemented by this algorithm is to allocate a
1082 * directory inode in the same cylinder group as its parent
1083 * directory, but also to reserve space for its files inodes
1084 * and data. Restrict the number of directories which may be
1085 * allocated one after another in the same cylinder group
1086 * without intervening allocation of files.
1087 *
1088 * If we allocate a first level directory then force allocation
1089 * in another cylinder group.
1090 */
1091static ino_t
1092ffs_dirpref(pip)
1093	struct inode *pip;
1094{
1095	struct fs *fs;
1096	int cg, prefcg, dirsize, cgsize;
1097	u_int avgifree, avgbfree, avgndir, curdirsize;
1098	u_int minifree, minbfree, maxndir;
1099	u_int mincg, minndir;
1100	u_int maxcontigdirs;
1101
1102	mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED);
1103	fs = pip->i_fs;
1104
1105	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1106	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1107	avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1108
1109	/*
1110	 * Force allocation in another cg if creating a first level dir.
1111	 */
1112	ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1113	if (ITOV(pip)->v_vflag & VV_ROOT) {
1114		prefcg = arc4random() % fs->fs_ncg;
1115		mincg = prefcg;
1116		minndir = fs->fs_ipg;
1117		for (cg = prefcg; cg < fs->fs_ncg; cg++)
1118			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1119			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1120			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1121				mincg = cg;
1122				minndir = fs->fs_cs(fs, cg).cs_ndir;
1123			}
1124		for (cg = 0; cg < prefcg; cg++)
1125			if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1126			    fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1127			    fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1128				mincg = cg;
1129				minndir = fs->fs_cs(fs, cg).cs_ndir;
1130			}
1131		return ((ino_t)(fs->fs_ipg * mincg));
1132	}
1133
1134	/*
1135	 * Count various limits which used for
1136	 * optimal allocation of a directory inode.
1137	 */
1138	maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1139	minifree = avgifree - avgifree / 4;
1140	if (minifree < 1)
1141		minifree = 1;
1142	minbfree = avgbfree - avgbfree / 4;
1143	if (minbfree < 1)
1144		minbfree = 1;
1145	cgsize = fs->fs_fsize * fs->fs_fpg;
1146	dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1147	curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1148	if (dirsize < curdirsize)
1149		dirsize = curdirsize;
1150	if (dirsize <= 0)
1151		maxcontigdirs = 0;		/* dirsize overflowed */
1152	else
1153		maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1154	if (fs->fs_avgfpdir > 0)
1155		maxcontigdirs = min(maxcontigdirs,
1156				    fs->fs_ipg / fs->fs_avgfpdir);
1157	if (maxcontigdirs == 0)
1158		maxcontigdirs = 1;
1159
1160	/*
1161	 * Limit number of dirs in one cg and reserve space for
1162	 * regular files, but only if we have no deficit in
1163	 * inodes or space.
1164	 *
1165	 * We are trying to find a suitable cylinder group nearby
1166	 * our preferred cylinder group to place a new directory.
1167	 * We scan from our preferred cylinder group forward looking
1168	 * for a cylinder group that meets our criterion. If we get
1169	 * to the final cylinder group and do not find anything,
1170	 * we start scanning backwards from our preferred cylinder
1171	 * group. The ideal would be to alternate looking forward
1172	 * and backward, but that is just too complex to code for
1173	 * the gain it would get. The most likely place where the
1174	 * backward scan would take effect is when we start near
1175	 * the end of the filesystem and do not find anything from
1176	 * where we are to the end. In that case, scanning backward
1177	 * will likely find us a suitable cylinder group much closer
1178	 * to our desired location than if we were to start scanning
1179	 * forward from the beginning of the filesystem.
1180	 */
1181	prefcg = ino_to_cg(fs, pip->i_number);
1182	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1183		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1184		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1185		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1186			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1187				return ((ino_t)(fs->fs_ipg * cg));
1188		}
1189	for (cg = 0; cg < prefcg; cg++)
1190		if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1191		    fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1192		    fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1193			if (fs->fs_contigdirs[cg] < maxcontigdirs)
1194				return ((ino_t)(fs->fs_ipg * cg));
1195		}
1196	/*
1197	 * This is a backstop when we have deficit in space.
1198	 */
1199	for (cg = prefcg; cg < fs->fs_ncg; cg++)
1200		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1201			return ((ino_t)(fs->fs_ipg * cg));
1202	for (cg = 0; cg < prefcg; cg++)
1203		if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1204			break;
1205	return ((ino_t)(fs->fs_ipg * cg));
1206}
1207
1208/*
1209 * Select the desired position for the next block in a file.  The file is
1210 * logically divided into sections. The first section is composed of the
1211 * direct blocks and the next fs_maxbpg blocks. Each additional section
1212 * contains fs_maxbpg blocks.
1213 *
1214 * If no blocks have been allocated in the first section, the policy is to
1215 * request a block in the same cylinder group as the inode that describes
1216 * the file. The first indirect is allocated immediately following the last
1217 * direct block and the data blocks for the first indirect immediately
1218 * follow it.
1219 *
1220 * If no blocks have been allocated in any other section, the indirect
1221 * block(s) are allocated in the same cylinder group as its inode in an
1222 * area reserved immediately following the inode blocks. The policy for
1223 * the data blocks is to place them in a cylinder group with a greater than
1224 * average number of free blocks. An appropriate cylinder group is found
1225 * by using a rotor that sweeps the cylinder groups. When a new group of
1226 * blocks is needed, the sweep begins in the cylinder group following the
1227 * cylinder group from which the previous allocation was made. The sweep
1228 * continues until a cylinder group with greater than the average number
1229 * of free blocks is found. If the allocation is for the first block in an
1230 * indirect block or the previous block is a hole, then the information on
1231 * the previous allocation is unavailable; here a best guess is made based
1232 * on the logical block number being allocated.
1233 *
1234 * If a section is already partially allocated, the policy is to
1235 * allocate blocks contiguously within the section if possible.
1236 */
1237ufs2_daddr_t
1238ffs_blkpref_ufs1(ip, lbn, indx, bap)
1239	struct inode *ip;
1240	ufs_lbn_t lbn;
1241	int indx;
1242	ufs1_daddr_t *bap;
1243{
1244	struct fs *fs;
1245	u_int cg, inocg;
1246	u_int avgbfree, startcg;
1247	ufs2_daddr_t pref;
1248
1249	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1250	mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1251	fs = ip->i_fs;
1252	/*
1253	 * Allocation of indirect blocks is indicated by passing negative
1254	 * values in indx: -1 for single indirect, -2 for double indirect,
1255	 * -3 for triple indirect. As noted below, we attempt to allocate
1256	 * the first indirect inline with the file data. For all later
1257	 * indirect blocks, the data is often allocated in other cylinder
1258	 * groups. However to speed random file access and to speed up
1259	 * fsck, the filesystem reserves the first fs_metaspace blocks
1260	 * (typically half of fs_minfree) of the data area of each cylinder
1261	 * group to hold these later indirect blocks.
1262	 */
1263	inocg = ino_to_cg(fs, ip->i_number);
1264	if (indx < 0) {
1265		/*
1266		 * Our preference for indirect blocks is the zone at the
1267		 * beginning of the inode's cylinder group data area that
1268		 * we try to reserve for indirect blocks.
1269		 */
1270		pref = cgmeta(fs, inocg);
1271		/*
1272		 * If we are allocating the first indirect block, try to
1273		 * place it immediately following the last direct block.
1274		 */
1275		if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1276		    ip->i_din1->di_db[NDADDR - 1] != 0)
1277			pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag;
1278		return (pref);
1279	}
1280	/*
1281	 * If we are allocating the first data block in the first indirect
1282	 * block and the indirect has been allocated in the data block area,
1283	 * try to place it immediately following the indirect block.
1284	 */
1285	if (lbn == NDADDR) {
1286		pref = ip->i_din1->di_ib[0];
1287		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1288		    pref < cgbase(fs, inocg + 1))
1289			return (pref + fs->fs_frag);
1290	}
1291	/*
1292	 * If we are at the beginning of a file, or we have already allocated
1293	 * the maximum number of blocks per cylinder group, or we do not
1294	 * have a block allocated immediately preceeding us, then we need
1295	 * to decide where to start allocating new blocks.
1296	 */
1297	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1298		/*
1299		 * If we are allocating a directory data block, we want
1300		 * to place it in the metadata area.
1301		 */
1302		if ((ip->i_mode & IFMT) == IFDIR)
1303			return (cgmeta(fs, inocg));
1304		/*
1305		 * Until we fill all the direct and all the first indirect's
1306		 * blocks, we try to allocate in the data area of the inode's
1307		 * cylinder group.
1308		 */
1309		if (lbn < NDADDR + NINDIR(fs))
1310			return (cgdata(fs, inocg));
1311		/*
1312		 * Find a cylinder with greater than average number of
1313		 * unused data blocks.
1314		 */
1315		if (indx == 0 || bap[indx - 1] == 0)
1316			startcg = inocg + lbn / fs->fs_maxbpg;
1317		else
1318			startcg = dtog(fs, bap[indx - 1]) + 1;
1319		startcg %= fs->fs_ncg;
1320		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1321		for (cg = startcg; cg < fs->fs_ncg; cg++)
1322			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1323				fs->fs_cgrotor = cg;
1324				return (cgdata(fs, cg));
1325			}
1326		for (cg = 0; cg <= startcg; cg++)
1327			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1328				fs->fs_cgrotor = cg;
1329				return (cgdata(fs, cg));
1330			}
1331		return (0);
1332	}
1333	/*
1334	 * Otherwise, we just always try to lay things out contiguously.
1335	 */
1336	return (bap[indx - 1] + fs->fs_frag);
1337}
1338
1339/*
1340 * Same as above, but for UFS2
1341 */
1342ufs2_daddr_t
1343ffs_blkpref_ufs2(ip, lbn, indx, bap)
1344	struct inode *ip;
1345	ufs_lbn_t lbn;
1346	int indx;
1347	ufs2_daddr_t *bap;
1348{
1349	struct fs *fs;
1350	u_int cg, inocg;
1351	u_int avgbfree, startcg;
1352	ufs2_daddr_t pref;
1353
1354	KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1355	mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1356	fs = ip->i_fs;
1357	/*
1358	 * Allocation of indirect blocks is indicated by passing negative
1359	 * values in indx: -1 for single indirect, -2 for double indirect,
1360	 * -3 for triple indirect. As noted below, we attempt to allocate
1361	 * the first indirect inline with the file data. For all later
1362	 * indirect blocks, the data is often allocated in other cylinder
1363	 * groups. However to speed random file access and to speed up
1364	 * fsck, the filesystem reserves the first fs_metaspace blocks
1365	 * (typically half of fs_minfree) of the data area of each cylinder
1366	 * group to hold these later indirect blocks.
1367	 */
1368	inocg = ino_to_cg(fs, ip->i_number);
1369	if (indx < 0) {
1370		/*
1371		 * Our preference for indirect blocks is the zone at the
1372		 * beginning of the inode's cylinder group data area that
1373		 * we try to reserve for indirect blocks.
1374		 */
1375		pref = cgmeta(fs, inocg);
1376		/*
1377		 * If we are allocating the first indirect block, try to
1378		 * place it immediately following the last direct block.
1379		 */
1380		if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1381		    ip->i_din2->di_db[NDADDR - 1] != 0)
1382			pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag;
1383		return (pref);
1384	}
1385	/*
1386	 * If we are allocating the first data block in the first indirect
1387	 * block and the indirect has been allocated in the data block area,
1388	 * try to place it immediately following the indirect block.
1389	 */
1390	if (lbn == NDADDR) {
1391		pref = ip->i_din2->di_ib[0];
1392		if (pref != 0 && pref >= cgdata(fs, inocg) &&
1393		    pref < cgbase(fs, inocg + 1))
1394			return (pref + fs->fs_frag);
1395	}
1396	/*
1397	 * If we are at the beginning of a file, or we have already allocated
1398	 * the maximum number of blocks per cylinder group, or we do not
1399	 * have a block allocated immediately preceeding us, then we need
1400	 * to decide where to start allocating new blocks.
1401	 */
1402	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1403		/*
1404		 * If we are allocating a directory data block, we want
1405		 * to place it in the metadata area.
1406		 */
1407		if ((ip->i_mode & IFMT) == IFDIR)
1408			return (cgmeta(fs, inocg));
1409		/*
1410		 * Until we fill all the direct and all the first indirect's
1411		 * blocks, we try to allocate in the data area of the inode's
1412		 * cylinder group.
1413		 */
1414		if (lbn < NDADDR + NINDIR(fs))
1415			return (cgdata(fs, inocg));
1416		/*
1417		 * Find a cylinder with greater than average number of
1418		 * unused data blocks.
1419		 */
1420		if (indx == 0 || bap[indx - 1] == 0)
1421			startcg = inocg + lbn / fs->fs_maxbpg;
1422		else
1423			startcg = dtog(fs, bap[indx - 1]) + 1;
1424		startcg %= fs->fs_ncg;
1425		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1426		for (cg = startcg; cg < fs->fs_ncg; cg++)
1427			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1428				fs->fs_cgrotor = cg;
1429				return (cgdata(fs, cg));
1430			}
1431		for (cg = 0; cg <= startcg; cg++)
1432			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1433				fs->fs_cgrotor = cg;
1434				return (cgdata(fs, cg));
1435			}
1436		return (0);
1437	}
1438	/*
1439	 * Otherwise, we just always try to lay things out contiguously.
1440	 */
1441	return (bap[indx - 1] + fs->fs_frag);
1442}
1443
1444/*
1445 * Implement the cylinder overflow algorithm.
1446 *
1447 * The policy implemented by this algorithm is:
1448 *   1) allocate the block in its requested cylinder group.
1449 *   2) quadradically rehash on the cylinder group number.
1450 *   3) brute force search for a free block.
1451 *
1452 * Must be called with the UFS lock held.  Will release the lock on success
1453 * and return with it held on failure.
1454 */
1455/*VARARGS5*/
1456static ufs2_daddr_t
1457ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1458	struct inode *ip;
1459	u_int cg;
1460	ufs2_daddr_t pref;
1461	int size;	/* Search size for data blocks, mode for inodes */
1462	int rsize;	/* Real allocated size. */
1463	allocfcn_t *allocator;
1464{
1465	struct fs *fs;
1466	ufs2_daddr_t result;
1467	u_int i, icg = cg;
1468
1469	mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED);
1470#ifdef INVARIANTS
1471	if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1472		panic("ffs_hashalloc: allocation on suspended filesystem");
1473#endif
1474	fs = ip->i_fs;
1475	/*
1476	 * 1: preferred cylinder group
1477	 */
1478	result = (*allocator)(ip, cg, pref, size, rsize);
1479	if (result)
1480		return (result);
1481	/*
1482	 * 2: quadratic rehash
1483	 */
1484	for (i = 1; i < fs->fs_ncg; i *= 2) {
1485		cg += i;
1486		if (cg >= fs->fs_ncg)
1487			cg -= fs->fs_ncg;
1488		result = (*allocator)(ip, cg, 0, size, rsize);
1489		if (result)
1490			return (result);
1491	}
1492	/*
1493	 * 3: brute force search
1494	 * Note that we start at i == 2, since 0 was checked initially,
1495	 * and 1 is always checked in the quadratic rehash.
1496	 */
1497	cg = (icg + 2) % fs->fs_ncg;
1498	for (i = 2; i < fs->fs_ncg; i++) {
1499		result = (*allocator)(ip, cg, 0, size, rsize);
1500		if (result)
1501			return (result);
1502		cg++;
1503		if (cg == fs->fs_ncg)
1504			cg = 0;
1505	}
1506	return (0);
1507}
1508
1509/*
1510 * Determine whether a fragment can be extended.
1511 *
1512 * Check to see if the necessary fragments are available, and
1513 * if they are, allocate them.
1514 */
1515static ufs2_daddr_t
1516ffs_fragextend(ip, cg, bprev, osize, nsize)
1517	struct inode *ip;
1518	u_int cg;
1519	ufs2_daddr_t bprev;
1520	int osize, nsize;
1521{
1522	struct fs *fs;
1523	struct cg *cgp;
1524	struct buf *bp;
1525	struct ufsmount *ump;
1526	int nffree;
1527	long bno;
1528	int frags, bbase;
1529	int i, error;
1530	u_int8_t *blksfree;
1531
1532	ump = ip->i_ump;
1533	fs = ip->i_fs;
1534	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1535		return (0);
1536	frags = numfrags(fs, nsize);
1537	bbase = fragnum(fs, bprev);
1538	if (bbase > fragnum(fs, (bprev + frags - 1))) {
1539		/* cannot extend across a block boundary */
1540		return (0);
1541	}
1542	UFS_UNLOCK(ump);
1543	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1544		(int)fs->fs_cgsize, NOCRED, &bp);
1545	if (error)
1546		goto fail;
1547	cgp = (struct cg *)bp->b_data;
1548	if (!cg_chkmagic(cgp))
1549		goto fail;
1550	bp->b_xflags |= BX_BKGRDWRITE;
1551	cgp->cg_old_time = cgp->cg_time = time_second;
1552	bno = dtogd(fs, bprev);
1553	blksfree = cg_blksfree(cgp);
1554	for (i = numfrags(fs, osize); i < frags; i++)
1555		if (isclr(blksfree, bno + i))
1556			goto fail;
1557	/*
1558	 * the current fragment can be extended
1559	 * deduct the count on fragment being extended into
1560	 * increase the count on the remaining fragment (if any)
1561	 * allocate the extended piece
1562	 */
1563	for (i = frags; i < fs->fs_frag - bbase; i++)
1564		if (isclr(blksfree, bno + i))
1565			break;
1566	cgp->cg_frsum[i - numfrags(fs, osize)]--;
1567	if (i != frags)
1568		cgp->cg_frsum[i - frags]++;
1569	for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1570		clrbit(blksfree, bno + i);
1571		cgp->cg_cs.cs_nffree--;
1572		nffree++;
1573	}
1574	UFS_LOCK(ump);
1575	fs->fs_cstotal.cs_nffree -= nffree;
1576	fs->fs_cs(fs, cg).cs_nffree -= nffree;
1577	fs->fs_fmod = 1;
1578	ACTIVECLEAR(fs, cg);
1579	UFS_UNLOCK(ump);
1580	if (DOINGSOFTDEP(ITOV(ip)))
1581		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1582		    frags, numfrags(fs, osize));
1583	bdwrite(bp);
1584	return (bprev);
1585
1586fail:
1587	brelse(bp);
1588	UFS_LOCK(ump);
1589	return (0);
1590
1591}
1592
1593/*
1594 * Determine whether a block can be allocated.
1595 *
1596 * Check to see if a block of the appropriate size is available,
1597 * and if it is, allocate it.
1598 */
1599static ufs2_daddr_t
1600ffs_alloccg(ip, cg, bpref, size, rsize)
1601	struct inode *ip;
1602	u_int cg;
1603	ufs2_daddr_t bpref;
1604	int size;
1605	int rsize;
1606{
1607	struct fs *fs;
1608	struct cg *cgp;
1609	struct buf *bp;
1610	struct ufsmount *ump;
1611	ufs1_daddr_t bno;
1612	ufs2_daddr_t blkno;
1613	int i, allocsiz, error, frags;
1614	u_int8_t *blksfree;
1615
1616	ump = ip->i_ump;
1617	fs = ip->i_fs;
1618	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1619		return (0);
1620	UFS_UNLOCK(ump);
1621	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1622		(int)fs->fs_cgsize, NOCRED, &bp);
1623	if (error)
1624		goto fail;
1625	cgp = (struct cg *)bp->b_data;
1626	if (!cg_chkmagic(cgp) ||
1627	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1628		goto fail;
1629	bp->b_xflags |= BX_BKGRDWRITE;
1630	cgp->cg_old_time = cgp->cg_time = time_second;
1631	if (size == fs->fs_bsize) {
1632		UFS_LOCK(ump);
1633		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1634		ACTIVECLEAR(fs, cg);
1635		UFS_UNLOCK(ump);
1636		bdwrite(bp);
1637		return (blkno);
1638	}
1639	/*
1640	 * check to see if any fragments are already available
1641	 * allocsiz is the size which will be allocated, hacking
1642	 * it down to a smaller size if necessary
1643	 */
1644	blksfree = cg_blksfree(cgp);
1645	frags = numfrags(fs, size);
1646	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1647		if (cgp->cg_frsum[allocsiz] != 0)
1648			break;
1649	if (allocsiz == fs->fs_frag) {
1650		/*
1651		 * no fragments were available, so a block will be
1652		 * allocated, and hacked up
1653		 */
1654		if (cgp->cg_cs.cs_nbfree == 0)
1655			goto fail;
1656		UFS_LOCK(ump);
1657		blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1658		ACTIVECLEAR(fs, cg);
1659		UFS_UNLOCK(ump);
1660		bdwrite(bp);
1661		return (blkno);
1662	}
1663	KASSERT(size == rsize,
1664	    ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1665	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1666	if (bno < 0)
1667		goto fail;
1668	for (i = 0; i < frags; i++)
1669		clrbit(blksfree, bno + i);
1670	cgp->cg_cs.cs_nffree -= frags;
1671	cgp->cg_frsum[allocsiz]--;
1672	if (frags != allocsiz)
1673		cgp->cg_frsum[allocsiz - frags]++;
1674	UFS_LOCK(ump);
1675	fs->fs_cstotal.cs_nffree -= frags;
1676	fs->fs_cs(fs, cg).cs_nffree -= frags;
1677	fs->fs_fmod = 1;
1678	blkno = cgbase(fs, cg) + bno;
1679	ACTIVECLEAR(fs, cg);
1680	UFS_UNLOCK(ump);
1681	if (DOINGSOFTDEP(ITOV(ip)))
1682		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1683	bdwrite(bp);
1684	return (blkno);
1685
1686fail:
1687	brelse(bp);
1688	UFS_LOCK(ump);
1689	return (0);
1690}
1691
1692/*
1693 * Allocate a block in a cylinder group.
1694 *
1695 * This algorithm implements the following policy:
1696 *   1) allocate the requested block.
1697 *   2) allocate a rotationally optimal block in the same cylinder.
1698 *   3) allocate the next available block on the block rotor for the
1699 *      specified cylinder group.
1700 * Note that this routine only allocates fs_bsize blocks; these
1701 * blocks may be fragmented by the routine that allocates them.
1702 */
1703static ufs2_daddr_t
1704ffs_alloccgblk(ip, bp, bpref, size)
1705	struct inode *ip;
1706	struct buf *bp;
1707	ufs2_daddr_t bpref;
1708	int size;
1709{
1710	struct fs *fs;
1711	struct cg *cgp;
1712	struct ufsmount *ump;
1713	ufs1_daddr_t bno;
1714	ufs2_daddr_t blkno;
1715	u_int8_t *blksfree;
1716	int i, cgbpref;
1717
1718	fs = ip->i_fs;
1719	ump = ip->i_ump;
1720	mtx_assert(UFS_MTX(ump), MA_OWNED);
1721	cgp = (struct cg *)bp->b_data;
1722	blksfree = cg_blksfree(cgp);
1723	if (bpref == 0) {
1724		bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1725	} else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1726		/* map bpref to correct zone in this cg */
1727		if (bpref < cgdata(fs, cgbpref))
1728			bpref = cgmeta(fs, cgp->cg_cgx);
1729		else
1730			bpref = cgdata(fs, cgp->cg_cgx);
1731	}
1732	/*
1733	 * if the requested block is available, use it
1734	 */
1735	bno = dtogd(fs, blknum(fs, bpref));
1736	if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1737		goto gotit;
1738	/*
1739	 * Take the next available block in this cylinder group.
1740	 */
1741	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1742	if (bno < 0)
1743		return (0);
1744	/* Update cg_rotor only if allocated from the data zone */
1745	if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1746		cgp->cg_rotor = bno;
1747gotit:
1748	blkno = fragstoblks(fs, bno);
1749	ffs_clrblock(fs, blksfree, (long)blkno);
1750	ffs_clusteracct(fs, cgp, blkno, -1);
1751	cgp->cg_cs.cs_nbfree--;
1752	fs->fs_cstotal.cs_nbfree--;
1753	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1754	fs->fs_fmod = 1;
1755	blkno = cgbase(fs, cgp->cg_cgx) + bno;
1756	/*
1757	 * If the caller didn't want the whole block free the frags here.
1758	 */
1759	size = numfrags(fs, size);
1760	if (size != fs->fs_frag) {
1761		bno = dtogd(fs, blkno);
1762		for (i = size; i < fs->fs_frag; i++)
1763			setbit(blksfree, bno + i);
1764		i = fs->fs_frag - size;
1765		cgp->cg_cs.cs_nffree += i;
1766		fs->fs_cstotal.cs_nffree += i;
1767		fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1768		fs->fs_fmod = 1;
1769		cgp->cg_frsum[i]++;
1770	}
1771	/* XXX Fixme. */
1772	UFS_UNLOCK(ump);
1773	if (DOINGSOFTDEP(ITOV(ip)))
1774		softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno,
1775		    size, 0);
1776	UFS_LOCK(ump);
1777	return (blkno);
1778}
1779
1780/*
1781 * Determine whether a cluster can be allocated.
1782 *
1783 * We do not currently check for optimal rotational layout if there
1784 * are multiple choices in the same cylinder group. Instead we just
1785 * take the first one that we find following bpref.
1786 */
1787static ufs2_daddr_t
1788ffs_clusteralloc(ip, cg, bpref, len, unused)
1789	struct inode *ip;
1790	u_int cg;
1791	ufs2_daddr_t bpref;
1792	int len;
1793	int unused;
1794{
1795	struct fs *fs;
1796	struct cg *cgp;
1797	struct buf *bp;
1798	struct ufsmount *ump;
1799	int i, run, bit, map, got;
1800	ufs2_daddr_t bno;
1801	u_char *mapp;
1802	int32_t *lp;
1803	u_int8_t *blksfree;
1804
1805	fs = ip->i_fs;
1806	ump = ip->i_ump;
1807	if (fs->fs_maxcluster[cg] < len)
1808		return (0);
1809	UFS_UNLOCK(ump);
1810	if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1811	    NOCRED, &bp))
1812		goto fail_lock;
1813	cgp = (struct cg *)bp->b_data;
1814	if (!cg_chkmagic(cgp))
1815		goto fail_lock;
1816	bp->b_xflags |= BX_BKGRDWRITE;
1817	/*
1818	 * Check to see if a cluster of the needed size (or bigger) is
1819	 * available in this cylinder group.
1820	 */
1821	lp = &cg_clustersum(cgp)[len];
1822	for (i = len; i <= fs->fs_contigsumsize; i++)
1823		if (*lp++ > 0)
1824			break;
1825	if (i > fs->fs_contigsumsize) {
1826		/*
1827		 * This is the first time looking for a cluster in this
1828		 * cylinder group. Update the cluster summary information
1829		 * to reflect the true maximum sized cluster so that
1830		 * future cluster allocation requests can avoid reading
1831		 * the cylinder group map only to find no clusters.
1832		 */
1833		lp = &cg_clustersum(cgp)[len - 1];
1834		for (i = len - 1; i > 0; i--)
1835			if (*lp-- > 0)
1836				break;
1837		UFS_LOCK(ump);
1838		fs->fs_maxcluster[cg] = i;
1839		goto fail;
1840	}
1841	/*
1842	 * Search the cluster map to find a big enough cluster.
1843	 * We take the first one that we find, even if it is larger
1844	 * than we need as we prefer to get one close to the previous
1845	 * block allocation. We do not search before the current
1846	 * preference point as we do not want to allocate a block
1847	 * that is allocated before the previous one (as we will
1848	 * then have to wait for another pass of the elevator
1849	 * algorithm before it will be read). We prefer to fail and
1850	 * be recalled to try an allocation in the next cylinder group.
1851	 */
1852	if (dtog(fs, bpref) != cg)
1853		bpref = cgdata(fs, cg);
1854	else
1855		bpref = blknum(fs, bpref);
1856	bpref = fragstoblks(fs, dtogd(fs, bpref));
1857	mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1858	map = *mapp++;
1859	bit = 1 << (bpref % NBBY);
1860	for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1861		if ((map & bit) == 0) {
1862			run = 0;
1863		} else {
1864			run++;
1865			if (run == len)
1866				break;
1867		}
1868		if ((got & (NBBY - 1)) != (NBBY - 1)) {
1869			bit <<= 1;
1870		} else {
1871			map = *mapp++;
1872			bit = 1;
1873		}
1874	}
1875	if (got >= cgp->cg_nclusterblks)
1876		goto fail_lock;
1877	/*
1878	 * Allocate the cluster that we have found.
1879	 */
1880	blksfree = cg_blksfree(cgp);
1881	for (i = 1; i <= len; i++)
1882		if (!ffs_isblock(fs, blksfree, got - run + i))
1883			panic("ffs_clusteralloc: map mismatch");
1884	bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1885	if (dtog(fs, bno) != cg)
1886		panic("ffs_clusteralloc: allocated out of group");
1887	len = blkstofrags(fs, len);
1888	UFS_LOCK(ump);
1889	for (i = 0; i < len; i += fs->fs_frag)
1890		if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1891			panic("ffs_clusteralloc: lost block");
1892	ACTIVECLEAR(fs, cg);
1893	UFS_UNLOCK(ump);
1894	bdwrite(bp);
1895	return (bno);
1896
1897fail_lock:
1898	UFS_LOCK(ump);
1899fail:
1900	brelse(bp);
1901	return (0);
1902}
1903
1904static inline struct buf *
1905getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1906{
1907	struct fs *fs;
1908
1909	fs = ip->i_fs;
1910	return (getblk(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs,
1911	    cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
1912	    gbflags));
1913}
1914
1915/*
1916 * Determine whether an inode can be allocated.
1917 *
1918 * Check to see if an inode is available, and if it is,
1919 * allocate it using the following policy:
1920 *   1) allocate the requested inode.
1921 *   2) allocate the next available inode after the requested
1922 *      inode in the specified cylinder group.
1923 */
1924static ufs2_daddr_t
1925ffs_nodealloccg(ip, cg, ipref, mode, unused)
1926	struct inode *ip;
1927	u_int cg;
1928	ufs2_daddr_t ipref;
1929	int mode;
1930	int unused;
1931{
1932	struct fs *fs;
1933	struct cg *cgp;
1934	struct buf *bp, *ibp;
1935	struct ufsmount *ump;
1936	u_int8_t *inosused, *loc;
1937	struct ufs2_dinode *dp2;
1938	int error, start, len, i;
1939	u_int32_t old_initediblk;
1940
1941	fs = ip->i_fs;
1942	ump = ip->i_ump;
1943check_nifree:
1944	if (fs->fs_cs(fs, cg).cs_nifree == 0)
1945		return (0);
1946	UFS_UNLOCK(ump);
1947	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1948		(int)fs->fs_cgsize, NOCRED, &bp);
1949	if (error) {
1950		brelse(bp);
1951		UFS_LOCK(ump);
1952		return (0);
1953	}
1954	cgp = (struct cg *)bp->b_data;
1955restart:
1956	if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
1957		brelse(bp);
1958		UFS_LOCK(ump);
1959		return (0);
1960	}
1961	bp->b_xflags |= BX_BKGRDWRITE;
1962	inosused = cg_inosused(cgp);
1963	if (ipref) {
1964		ipref %= fs->fs_ipg;
1965		if (isclr(inosused, ipref))
1966			goto gotit;
1967	}
1968	start = cgp->cg_irotor / NBBY;
1969	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
1970	loc = memcchr(&inosused[start], 0xff, len);
1971	if (loc == NULL) {
1972		len = start + 1;
1973		start = 0;
1974		loc = memcchr(&inosused[start], 0xff, len);
1975		if (loc == NULL) {
1976			printf("cg = %d, irotor = %ld, fs = %s\n",
1977			    cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
1978			panic("ffs_nodealloccg: map corrupted");
1979			/* NOTREACHED */
1980		}
1981	}
1982	ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
1983gotit:
1984	/*
1985	 * Check to see if we need to initialize more inodes.
1986	 */
1987	if (fs->fs_magic == FS_UFS2_MAGIC &&
1988	    ipref + INOPB(fs) > cgp->cg_initediblk &&
1989	    cgp->cg_initediblk < cgp->cg_niblk) {
1990		old_initediblk = cgp->cg_initediblk;
1991
1992		/*
1993		 * Free the cylinder group lock before writing the
1994		 * initialized inode block.  Entering the
1995		 * babarrierwrite() with the cylinder group lock
1996		 * causes lock order violation between the lock and
1997		 * snaplk.
1998		 *
1999		 * Another thread can decide to initialize the same
2000		 * inode block, but whichever thread first gets the
2001		 * cylinder group lock after writing the newly
2002		 * allocated inode block will update it and the other
2003		 * will realize that it has lost and leave the
2004		 * cylinder group unchanged.
2005		 */
2006		ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2007		brelse(bp);
2008		if (ibp == NULL) {
2009			/*
2010			 * The inode block buffer is already owned by
2011			 * another thread, which must initialize it.
2012			 * Wait on the buffer to allow another thread
2013			 * to finish the updates, with dropped cg
2014			 * buffer lock, then retry.
2015			 */
2016			ibp = getinobuf(ip, cg, old_initediblk, 0);
2017			brelse(ibp);
2018			UFS_LOCK(ump);
2019			goto check_nifree;
2020		}
2021		bzero(ibp->b_data, (int)fs->fs_bsize);
2022		dp2 = (struct ufs2_dinode *)(ibp->b_data);
2023		for (i = 0; i < INOPB(fs); i++) {
2024			dp2->di_gen = arc4random() / 2 + 1;
2025			dp2++;
2026		}
2027		/*
2028		 * Rather than adding a soft updates dependency to ensure
2029		 * that the new inode block is written before it is claimed
2030		 * by the cylinder group map, we just do a barrier write
2031		 * here. The barrier write will ensure that the inode block
2032		 * gets written before the updated cylinder group map can be
2033		 * written. The barrier write should only slow down bulk
2034		 * loading of newly created filesystems.
2035		 */
2036		babarrierwrite(ibp);
2037
2038		/*
2039		 * After the inode block is written, try to update the
2040		 * cg initediblk pointer.  If another thread beat us
2041		 * to it, then leave it unchanged as the other thread
2042		 * has already set it correctly.
2043		 */
2044		error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
2045		    (int)fs->fs_cgsize, NOCRED, &bp);
2046		UFS_LOCK(ump);
2047		ACTIVECLEAR(fs, cg);
2048		UFS_UNLOCK(ump);
2049		if (error != 0) {
2050			brelse(bp);
2051			return (error);
2052		}
2053		cgp = (struct cg *)bp->b_data;
2054		if (cgp->cg_initediblk == old_initediblk)
2055			cgp->cg_initediblk += INOPB(fs);
2056		goto restart;
2057	}
2058	cgp->cg_old_time = cgp->cg_time = time_second;
2059	cgp->cg_irotor = ipref;
2060	UFS_LOCK(ump);
2061	ACTIVECLEAR(fs, cg);
2062	setbit(inosused, ipref);
2063	cgp->cg_cs.cs_nifree--;
2064	fs->fs_cstotal.cs_nifree--;
2065	fs->fs_cs(fs, cg).cs_nifree--;
2066	fs->fs_fmod = 1;
2067	if ((mode & IFMT) == IFDIR) {
2068		cgp->cg_cs.cs_ndir++;
2069		fs->fs_cstotal.cs_ndir++;
2070		fs->fs_cs(fs, cg).cs_ndir++;
2071	}
2072	UFS_UNLOCK(ump);
2073	if (DOINGSOFTDEP(ITOV(ip)))
2074		softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2075	bdwrite(bp);
2076	return ((ino_t)(cg * fs->fs_ipg + ipref));
2077}
2078
2079/*
2080 * Free a block or fragment.
2081 *
2082 * The specified block or fragment is placed back in the
2083 * free map. If a fragment is deallocated, a possible
2084 * block reassembly is checked.
2085 */
2086static void
2087ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2088	struct ufsmount *ump;
2089	struct fs *fs;
2090	struct vnode *devvp;
2091	ufs2_daddr_t bno;
2092	long size;
2093	ino_t inum;
2094	struct workhead *dephd;
2095{
2096	struct mount *mp;
2097	struct cg *cgp;
2098	struct buf *bp;
2099	ufs1_daddr_t fragno, cgbno;
2100	ufs2_daddr_t cgblkno;
2101	int i, blk, frags, bbase;
2102	u_int cg;
2103	u_int8_t *blksfree;
2104	struct cdev *dev;
2105
2106	cg = dtog(fs, bno);
2107	if (devvp->v_type == VREG) {
2108		/* devvp is a snapshot */
2109		dev = VTOI(devvp)->i_devvp->v_rdev;
2110		cgblkno = fragstoblks(fs, cgtod(fs, cg));
2111	} else {
2112		/* devvp is a normal disk device */
2113		dev = devvp->v_rdev;
2114		cgblkno = fsbtodb(fs, cgtod(fs, cg));
2115		ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2116	}
2117#ifdef INVARIANTS
2118	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2119	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2120		printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2121		    devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2122		    size, fs->fs_fsmnt);
2123		panic("ffs_blkfree_cg: bad size");
2124	}
2125#endif
2126	if ((u_int)bno >= fs->fs_size) {
2127		printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2128		    (u_long)inum);
2129		ffs_fserr(fs, inum, "bad block");
2130		return;
2131	}
2132	if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2133		brelse(bp);
2134		return;
2135	}
2136	cgp = (struct cg *)bp->b_data;
2137	if (!cg_chkmagic(cgp)) {
2138		brelse(bp);
2139		return;
2140	}
2141	bp->b_xflags |= BX_BKGRDWRITE;
2142	cgp->cg_old_time = cgp->cg_time = time_second;
2143	cgbno = dtogd(fs, bno);
2144	blksfree = cg_blksfree(cgp);
2145	UFS_LOCK(ump);
2146	if (size == fs->fs_bsize) {
2147		fragno = fragstoblks(fs, cgbno);
2148		if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2149			if (devvp->v_type == VREG) {
2150				UFS_UNLOCK(ump);
2151				/* devvp is a snapshot */
2152				brelse(bp);
2153				return;
2154			}
2155			printf("dev = %s, block = %jd, fs = %s\n",
2156			    devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2157			panic("ffs_blkfree_cg: freeing free block");
2158		}
2159		ffs_setblock(fs, blksfree, fragno);
2160		ffs_clusteracct(fs, cgp, fragno, 1);
2161		cgp->cg_cs.cs_nbfree++;
2162		fs->fs_cstotal.cs_nbfree++;
2163		fs->fs_cs(fs, cg).cs_nbfree++;
2164	} else {
2165		bbase = cgbno - fragnum(fs, cgbno);
2166		/*
2167		 * decrement the counts associated with the old frags
2168		 */
2169		blk = blkmap(fs, blksfree, bbase);
2170		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2171		/*
2172		 * deallocate the fragment
2173		 */
2174		frags = numfrags(fs, size);
2175		for (i = 0; i < frags; i++) {
2176			if (isset(blksfree, cgbno + i)) {
2177				printf("dev = %s, block = %jd, fs = %s\n",
2178				    devtoname(dev), (intmax_t)(bno + i),
2179				    fs->fs_fsmnt);
2180				panic("ffs_blkfree_cg: freeing free frag");
2181			}
2182			setbit(blksfree, cgbno + i);
2183		}
2184		cgp->cg_cs.cs_nffree += i;
2185		fs->fs_cstotal.cs_nffree += i;
2186		fs->fs_cs(fs, cg).cs_nffree += i;
2187		/*
2188		 * add back in counts associated with the new frags
2189		 */
2190		blk = blkmap(fs, blksfree, bbase);
2191		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2192		/*
2193		 * if a complete block has been reassembled, account for it
2194		 */
2195		fragno = fragstoblks(fs, bbase);
2196		if (ffs_isblock(fs, blksfree, fragno)) {
2197			cgp->cg_cs.cs_nffree -= fs->fs_frag;
2198			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2199			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2200			ffs_clusteracct(fs, cgp, fragno, 1);
2201			cgp->cg_cs.cs_nbfree++;
2202			fs->fs_cstotal.cs_nbfree++;
2203			fs->fs_cs(fs, cg).cs_nbfree++;
2204		}
2205	}
2206	fs->fs_fmod = 1;
2207	ACTIVECLEAR(fs, cg);
2208	UFS_UNLOCK(ump);
2209	mp = UFSTOVFS(ump);
2210	if (MOUNTEDSOFTDEP(mp) && devvp->v_type != VREG)
2211		softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2212		    numfrags(fs, size), dephd);
2213	bdwrite(bp);
2214}
2215
2216TASKQUEUE_DEFINE_THREAD(ffs_trim);
2217
2218struct ffs_blkfree_trim_params {
2219	struct task task;
2220	struct ufsmount *ump;
2221	struct vnode *devvp;
2222	ufs2_daddr_t bno;
2223	long size;
2224	ino_t inum;
2225	struct workhead *pdephd;
2226	struct workhead dephd;
2227};
2228
2229static void
2230ffs_blkfree_trim_task(ctx, pending)
2231	void *ctx;
2232	int pending;
2233{
2234	struct ffs_blkfree_trim_params *tp;
2235
2236	tp = ctx;
2237	ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2238	    tp->inum, tp->pdephd);
2239	vn_finished_secondary_write(UFSTOVFS(tp->ump));
2240	free(tp, M_TEMP);
2241}
2242
2243static void
2244ffs_blkfree_trim_completed(bip)
2245	struct bio *bip;
2246{
2247	struct ffs_blkfree_trim_params *tp;
2248
2249	tp = bip->bio_caller2;
2250	g_destroy_bio(bip);
2251	TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2252	taskqueue_enqueue(taskqueue_ffs_trim, &tp->task);
2253}
2254
2255void
2256ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd)
2257	struct ufsmount *ump;
2258	struct fs *fs;
2259	struct vnode *devvp;
2260	ufs2_daddr_t bno;
2261	long size;
2262	ino_t inum;
2263	enum vtype vtype;
2264	struct workhead *dephd;
2265{
2266	struct mount *mp;
2267	struct bio *bip;
2268	struct ffs_blkfree_trim_params *tp;
2269
2270	/*
2271	 * Check to see if a snapshot wants to claim the block.
2272	 * Check that devvp is a normal disk device, not a snapshot,
2273	 * it has a snapshot(s) associated with it, and one of the
2274	 * snapshots wants to claim the block.
2275	 */
2276	if (devvp->v_type != VREG &&
2277	    (devvp->v_vflag & VV_COPYONWRITE) &&
2278	    ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2279		return;
2280	}
2281	/*
2282	 * Nothing to delay if TRIM is disabled, or the operation is
2283	 * performed on the snapshot.
2284	 */
2285	if (!ump->um_candelete || devvp->v_type == VREG) {
2286		ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2287		return;
2288	}
2289
2290	/*
2291	 * Postpone the set of the free bit in the cg bitmap until the
2292	 * BIO_DELETE is completed.  Otherwise, due to disk queue
2293	 * reordering, TRIM might be issued after we reuse the block
2294	 * and write some new data into it.
2295	 */
2296	tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK);
2297	tp->ump = ump;
2298	tp->devvp = devvp;
2299	tp->bno = bno;
2300	tp->size = size;
2301	tp->inum = inum;
2302	if (dephd != NULL) {
2303		LIST_INIT(&tp->dephd);
2304		LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2305		tp->pdephd = &tp->dephd;
2306	} else
2307		tp->pdephd = NULL;
2308
2309	bip = g_alloc_bio();
2310	bip->bio_cmd = BIO_DELETE;
2311	bip->bio_offset = dbtob(fsbtodb(fs, bno));
2312	bip->bio_done = ffs_blkfree_trim_completed;
2313	bip->bio_length = size;
2314	bip->bio_caller2 = tp;
2315
2316	mp = UFSTOVFS(ump);
2317	vn_start_secondary_write(NULL, &mp, 0);
2318	g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private);
2319}
2320
2321#ifdef INVARIANTS
2322/*
2323 * Verify allocation of a block or fragment. Returns true if block or
2324 * fragment is allocated, false if it is free.
2325 */
2326static int
2327ffs_checkblk(ip, bno, size)
2328	struct inode *ip;
2329	ufs2_daddr_t bno;
2330	long size;
2331{
2332	struct fs *fs;
2333	struct cg *cgp;
2334	struct buf *bp;
2335	ufs1_daddr_t cgbno;
2336	int i, error, frags, free;
2337	u_int8_t *blksfree;
2338
2339	fs = ip->i_fs;
2340	if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2341		printf("bsize = %ld, size = %ld, fs = %s\n",
2342		    (long)fs->fs_bsize, size, fs->fs_fsmnt);
2343		panic("ffs_checkblk: bad size");
2344	}
2345	if ((u_int)bno >= fs->fs_size)
2346		panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2347	error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
2348		(int)fs->fs_cgsize, NOCRED, &bp);
2349	if (error)
2350		panic("ffs_checkblk: cg bread failed");
2351	cgp = (struct cg *)bp->b_data;
2352	if (!cg_chkmagic(cgp))
2353		panic("ffs_checkblk: cg magic mismatch");
2354	bp->b_xflags |= BX_BKGRDWRITE;
2355	blksfree = cg_blksfree(cgp);
2356	cgbno = dtogd(fs, bno);
2357	if (size == fs->fs_bsize) {
2358		free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2359	} else {
2360		frags = numfrags(fs, size);
2361		for (free = 0, i = 0; i < frags; i++)
2362			if (isset(blksfree, cgbno + i))
2363				free++;
2364		if (free != 0 && free != frags)
2365			panic("ffs_checkblk: partially free fragment");
2366	}
2367	brelse(bp);
2368	return (!free);
2369}
2370#endif /* INVARIANTS */
2371
2372/*
2373 * Free an inode.
2374 */
2375int
2376ffs_vfree(pvp, ino, mode)
2377	struct vnode *pvp;
2378	ino_t ino;
2379	int mode;
2380{
2381	struct inode *ip;
2382
2383	if (DOINGSOFTDEP(pvp)) {
2384		softdep_freefile(pvp, ino, mode);
2385		return (0);
2386	}
2387	ip = VTOI(pvp);
2388	return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode,
2389	    NULL));
2390}
2391
2392/*
2393 * Do the actual free operation.
2394 * The specified inode is placed back in the free map.
2395 */
2396int
2397ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2398	struct ufsmount *ump;
2399	struct fs *fs;
2400	struct vnode *devvp;
2401	ino_t ino;
2402	int mode;
2403	struct workhead *wkhd;
2404{
2405	struct cg *cgp;
2406	struct buf *bp;
2407	ufs2_daddr_t cgbno;
2408	int error;
2409	u_int cg;
2410	u_int8_t *inosused;
2411	struct cdev *dev;
2412
2413	cg = ino_to_cg(fs, ino);
2414	if (devvp->v_type == VREG) {
2415		/* devvp is a snapshot */
2416		dev = VTOI(devvp)->i_devvp->v_rdev;
2417		cgbno = fragstoblks(fs, cgtod(fs, cg));
2418	} else {
2419		/* devvp is a normal disk device */
2420		dev = devvp->v_rdev;
2421		cgbno = fsbtodb(fs, cgtod(fs, cg));
2422	}
2423	if (ino >= fs->fs_ipg * fs->fs_ncg)
2424		panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2425		    devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2426	if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
2427		brelse(bp);
2428		return (error);
2429	}
2430	cgp = (struct cg *)bp->b_data;
2431	if (!cg_chkmagic(cgp)) {
2432		brelse(bp);
2433		return (0);
2434	}
2435	bp->b_xflags |= BX_BKGRDWRITE;
2436	cgp->cg_old_time = cgp->cg_time = time_second;
2437	inosused = cg_inosused(cgp);
2438	ino %= fs->fs_ipg;
2439	if (isclr(inosused, ino)) {
2440		printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2441		    (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt);
2442		if (fs->fs_ronly == 0)
2443			panic("ffs_freefile: freeing free inode");
2444	}
2445	clrbit(inosused, ino);
2446	if (ino < cgp->cg_irotor)
2447		cgp->cg_irotor = ino;
2448	cgp->cg_cs.cs_nifree++;
2449	UFS_LOCK(ump);
2450	fs->fs_cstotal.cs_nifree++;
2451	fs->fs_cs(fs, cg).cs_nifree++;
2452	if ((mode & IFMT) == IFDIR) {
2453		cgp->cg_cs.cs_ndir--;
2454		fs->fs_cstotal.cs_ndir--;
2455		fs->fs_cs(fs, cg).cs_ndir--;
2456	}
2457	fs->fs_fmod = 1;
2458	ACTIVECLEAR(fs, cg);
2459	UFS_UNLOCK(ump);
2460	if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type != VREG)
2461		softdep_setup_inofree(UFSTOVFS(ump), bp,
2462		    ino + cg * fs->fs_ipg, wkhd);
2463	bdwrite(bp);
2464	return (0);
2465}
2466
2467/*
2468 * Check to see if a file is free.
2469 */
2470int
2471ffs_checkfreefile(fs, devvp, ino)
2472	struct fs *fs;
2473	struct vnode *devvp;
2474	ino_t ino;
2475{
2476	struct cg *cgp;
2477	struct buf *bp;
2478	ufs2_daddr_t cgbno;
2479	int ret;
2480	u_int cg;
2481	u_int8_t *inosused;
2482
2483	cg = ino_to_cg(fs, ino);
2484	if (devvp->v_type == VREG) {
2485		/* devvp is a snapshot */
2486		cgbno = fragstoblks(fs, cgtod(fs, cg));
2487	} else {
2488		/* devvp is a normal disk device */
2489		cgbno = fsbtodb(fs, cgtod(fs, cg));
2490	}
2491	if (ino >= fs->fs_ipg * fs->fs_ncg)
2492		return (1);
2493	if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2494		brelse(bp);
2495		return (1);
2496	}
2497	cgp = (struct cg *)bp->b_data;
2498	if (!cg_chkmagic(cgp)) {
2499		brelse(bp);
2500		return (1);
2501	}
2502	inosused = cg_inosused(cgp);
2503	ino %= fs->fs_ipg;
2504	ret = isclr(inosused, ino);
2505	brelse(bp);
2506	return (ret);
2507}
2508
2509/*
2510 * Find a block of the specified size in the specified cylinder group.
2511 *
2512 * It is a panic if a request is made to find a block if none are
2513 * available.
2514 */
2515static ufs1_daddr_t
2516ffs_mapsearch(fs, cgp, bpref, allocsiz)
2517	struct fs *fs;
2518	struct cg *cgp;
2519	ufs2_daddr_t bpref;
2520	int allocsiz;
2521{
2522	ufs1_daddr_t bno;
2523	int start, len, loc, i;
2524	int blk, field, subfield, pos;
2525	u_int8_t *blksfree;
2526
2527	/*
2528	 * find the fragment by searching through the free block
2529	 * map for an appropriate bit pattern
2530	 */
2531	if (bpref)
2532		start = dtogd(fs, bpref) / NBBY;
2533	else
2534		start = cgp->cg_frotor / NBBY;
2535	blksfree = cg_blksfree(cgp);
2536	len = howmany(fs->fs_fpg, NBBY) - start;
2537	loc = scanc((u_int)len, (u_char *)&blksfree[start],
2538		fragtbl[fs->fs_frag],
2539		(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2540	if (loc == 0) {
2541		len = start + 1;
2542		start = 0;
2543		loc = scanc((u_int)len, (u_char *)&blksfree[0],
2544			fragtbl[fs->fs_frag],
2545			(u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2546		if (loc == 0) {
2547			printf("start = %d, len = %d, fs = %s\n",
2548			    start, len, fs->fs_fsmnt);
2549			panic("ffs_alloccg: map corrupted");
2550			/* NOTREACHED */
2551		}
2552	}
2553	bno = (start + len - loc) * NBBY;
2554	cgp->cg_frotor = bno;
2555	/*
2556	 * found the byte in the map
2557	 * sift through the bits to find the selected frag
2558	 */
2559	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2560		blk = blkmap(fs, blksfree, bno);
2561		blk <<= 1;
2562		field = around[allocsiz];
2563		subfield = inside[allocsiz];
2564		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2565			if ((blk & field) == subfield)
2566				return (bno + pos);
2567			field <<= 1;
2568			subfield <<= 1;
2569		}
2570	}
2571	printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2572	panic("ffs_alloccg: block not in map");
2573	return (-1);
2574}
2575
2576/*
2577 * Fserr prints the name of a filesystem with an error diagnostic.
2578 *
2579 * The form of the error message is:
2580 *	fs: error message
2581 */
2582void
2583ffs_fserr(fs, inum, cp)
2584	struct fs *fs;
2585	ino_t inum;
2586	char *cp;
2587{
2588	struct thread *td = curthread;	/* XXX */
2589	struct proc *p = td->td_proc;
2590
2591	log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
2592	    p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
2593	    fs->fs_fsmnt, cp);
2594}
2595
2596/*
2597 * This function provides the capability for the fsck program to
2598 * update an active filesystem. Fourteen operations are provided:
2599 *
2600 * adjrefcnt(inode, amt) - adjusts the reference count on the
2601 *	specified inode by the specified amount. Under normal
2602 *	operation the count should always go down. Decrementing
2603 *	the count to zero will cause the inode to be freed.
2604 * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2605 *	inode by the specified amount.
2606 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2607 *	adjust the superblock summary.
2608 * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2609 *	are marked as free. Inodes should never have to be marked
2610 *	as in use.
2611 * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2612 *	are marked as free. Inodes should never have to be marked
2613 *	as in use.
2614 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2615 *	are marked as free. Blocks should never have to be marked
2616 *	as in use.
2617 * setflags(flags, set/clear) - the fs_flags field has the specified
2618 *	flags set (second parameter +1) or cleared (second parameter -1).
2619 * setcwd(dirinode) - set the current directory to dirinode in the
2620 *	filesystem associated with the snapshot.
2621 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2622 *	in the current directory is oldvalue then change it to newvalue.
2623 * unlink(nameptr, oldvalue) - Verify that the inode number associated
2624 *	with nameptr in the current directory is oldvalue then unlink it.
2625 *
2626 * The following functions may only be used on a quiescent filesystem
2627 * by the soft updates journal. They are not safe to be run on an active
2628 * filesystem.
2629 *
2630 * setinode(inode, dip) - the specified disk inode is replaced with the
2631 *	contents pointed to by dip.
2632 * setbufoutput(fd, flags) - output associated with the specified file
2633 *	descriptor (which must reference the character device supporting
2634 *	the filesystem) switches from using physio to running through the
2635 *	buffer cache when flags is set to 1. The descriptor reverts to
2636 *	physio for output when flags is set to zero.
2637 */
2638
2639static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2640
2641SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2642	0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2643
2644static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2645	sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2646
2647static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2648	sysctl_ffs_fsck, "Adjust number of directories");
2649
2650static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2651	sysctl_ffs_fsck, "Adjust number of free blocks");
2652
2653static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2654	sysctl_ffs_fsck, "Adjust number of free inodes");
2655
2656static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2657	sysctl_ffs_fsck, "Adjust number of free frags");
2658
2659static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2660	sysctl_ffs_fsck, "Adjust number of free clusters");
2661
2662static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2663	sysctl_ffs_fsck, "Free Range of Directory Inodes");
2664
2665static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2666	sysctl_ffs_fsck, "Free Range of File Inodes");
2667
2668static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2669	sysctl_ffs_fsck, "Free Range of Blocks");
2670
2671static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2672	sysctl_ffs_fsck, "Change Filesystem Flags");
2673
2674static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2675	sysctl_ffs_fsck, "Set Current Working Directory");
2676
2677static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2678	sysctl_ffs_fsck, "Change Value of .. Entry");
2679
2680static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2681	sysctl_ffs_fsck, "Unlink a Duplicate Name");
2682
2683static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2684	sysctl_ffs_fsck, "Update an On-Disk Inode");
2685
2686static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2687	sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2688
2689#define DEBUG 1
2690#ifdef DEBUG
2691static int fsckcmds = 0;
2692SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2693#endif /* DEBUG */
2694
2695static int buffered_write(struct file *, struct uio *, struct ucred *,
2696	int, struct thread *);
2697
2698static int
2699sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2700{
2701	struct thread *td = curthread;
2702	struct fsck_cmd cmd;
2703	struct ufsmount *ump;
2704	struct vnode *vp, *vpold, *dvp, *fdvp;
2705	struct inode *ip, *dp;
2706	struct mount *mp;
2707	struct fs *fs;
2708	ufs2_daddr_t blkno;
2709	long blkcnt, blksize;
2710	struct filedesc *fdp;
2711	struct file *fp, *vfp;
2712	cap_rights_t rights;
2713	int filetype, error;
2714	static struct fileops *origops, bufferedops;
2715
2716	if (req->newlen > sizeof cmd)
2717		return (EBADRPC);
2718	if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2719		return (error);
2720	if (cmd.version != FFS_CMD_VERSION)
2721		return (ERPCMISMATCH);
2722	if ((error = getvnode(td->td_proc->p_fd, cmd.handle,
2723	    cap_rights_init(&rights, CAP_FSCK), &fp)) != 0)
2724		return (error);
2725	vp = fp->f_data;
2726	if (vp->v_type != VREG && vp->v_type != VDIR) {
2727		fdrop(fp, td);
2728		return (EINVAL);
2729	}
2730	vn_start_write(vp, &mp, V_WAIT);
2731	if (mp == 0 || strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2732		vn_finished_write(mp);
2733		fdrop(fp, td);
2734		return (EINVAL);
2735	}
2736	ump = VFSTOUFS(mp);
2737	if ((mp->mnt_flag & MNT_RDONLY) &&
2738	    ump->um_fsckpid != td->td_proc->p_pid) {
2739		vn_finished_write(mp);
2740		fdrop(fp, td);
2741		return (EROFS);
2742	}
2743	fs = ump->um_fs;
2744	filetype = IFREG;
2745
2746	switch (oidp->oid_number) {
2747
2748	case FFS_SET_FLAGS:
2749#ifdef DEBUG
2750		if (fsckcmds)
2751			printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2752			    cmd.size > 0 ? "set" : "clear");
2753#endif /* DEBUG */
2754		if (cmd.size > 0)
2755			fs->fs_flags |= (long)cmd.value;
2756		else
2757			fs->fs_flags &= ~(long)cmd.value;
2758		break;
2759
2760	case FFS_ADJ_REFCNT:
2761#ifdef DEBUG
2762		if (fsckcmds) {
2763			printf("%s: adjust inode %jd link count by %jd\n",
2764			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2765			    (intmax_t)cmd.size);
2766		}
2767#endif /* DEBUG */
2768		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2769			break;
2770		ip = VTOI(vp);
2771		ip->i_nlink += cmd.size;
2772		DIP_SET(ip, i_nlink, ip->i_nlink);
2773		ip->i_effnlink += cmd.size;
2774		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2775		error = ffs_update(vp, 1);
2776		if (DOINGSOFTDEP(vp))
2777			softdep_change_linkcnt(ip);
2778		vput(vp);
2779		break;
2780
2781	case FFS_ADJ_BLKCNT:
2782#ifdef DEBUG
2783		if (fsckcmds) {
2784			printf("%s: adjust inode %jd block count by %jd\n",
2785			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2786			    (intmax_t)cmd.size);
2787		}
2788#endif /* DEBUG */
2789		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2790			break;
2791		ip = VTOI(vp);
2792		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2793		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2794		error = ffs_update(vp, 1);
2795		vput(vp);
2796		break;
2797
2798	case FFS_DIR_FREE:
2799		filetype = IFDIR;
2800		/* fall through */
2801
2802	case FFS_FILE_FREE:
2803#ifdef DEBUG
2804		if (fsckcmds) {
2805			if (cmd.size == 1)
2806				printf("%s: free %s inode %ju\n",
2807				    mp->mnt_stat.f_mntonname,
2808				    filetype == IFDIR ? "directory" : "file",
2809				    (uintmax_t)cmd.value);
2810			else
2811				printf("%s: free %s inodes %ju-%ju\n",
2812				    mp->mnt_stat.f_mntonname,
2813				    filetype == IFDIR ? "directory" : "file",
2814				    (uintmax_t)cmd.value,
2815				    (uintmax_t)(cmd.value + cmd.size - 1));
2816		}
2817#endif /* DEBUG */
2818		while (cmd.size > 0) {
2819			if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2820			    cmd.value, filetype, NULL)))
2821				break;
2822			cmd.size -= 1;
2823			cmd.value += 1;
2824		}
2825		break;
2826
2827	case FFS_BLK_FREE:
2828#ifdef DEBUG
2829		if (fsckcmds) {
2830			if (cmd.size == 1)
2831				printf("%s: free block %jd\n",
2832				    mp->mnt_stat.f_mntonname,
2833				    (intmax_t)cmd.value);
2834			else
2835				printf("%s: free blocks %jd-%jd\n",
2836				    mp->mnt_stat.f_mntonname,
2837				    (intmax_t)cmd.value,
2838				    (intmax_t)cmd.value + cmd.size - 1);
2839		}
2840#endif /* DEBUG */
2841		blkno = cmd.value;
2842		blkcnt = cmd.size;
2843		blksize = fs->fs_frag - (blkno % fs->fs_frag);
2844		while (blkcnt > 0) {
2845			if (blksize > blkcnt)
2846				blksize = blkcnt;
2847			ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2848			    blksize * fs->fs_fsize, ROOTINO, VDIR, NULL);
2849			blkno += blksize;
2850			blkcnt -= blksize;
2851			blksize = fs->fs_frag;
2852		}
2853		break;
2854
2855	/*
2856	 * Adjust superblock summaries.  fsck(8) is expected to
2857	 * submit deltas when necessary.
2858	 */
2859	case FFS_ADJ_NDIR:
2860#ifdef DEBUG
2861		if (fsckcmds) {
2862			printf("%s: adjust number of directories by %jd\n",
2863			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2864		}
2865#endif /* DEBUG */
2866		fs->fs_cstotal.cs_ndir += cmd.value;
2867		break;
2868
2869	case FFS_ADJ_NBFREE:
2870#ifdef DEBUG
2871		if (fsckcmds) {
2872			printf("%s: adjust number of free blocks by %+jd\n",
2873			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2874		}
2875#endif /* DEBUG */
2876		fs->fs_cstotal.cs_nbfree += cmd.value;
2877		break;
2878
2879	case FFS_ADJ_NIFREE:
2880#ifdef DEBUG
2881		if (fsckcmds) {
2882			printf("%s: adjust number of free inodes by %+jd\n",
2883			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2884		}
2885#endif /* DEBUG */
2886		fs->fs_cstotal.cs_nifree += cmd.value;
2887		break;
2888
2889	case FFS_ADJ_NFFREE:
2890#ifdef DEBUG
2891		if (fsckcmds) {
2892			printf("%s: adjust number of free frags by %+jd\n",
2893			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2894		}
2895#endif /* DEBUG */
2896		fs->fs_cstotal.cs_nffree += cmd.value;
2897		break;
2898
2899	case FFS_ADJ_NUMCLUSTERS:
2900#ifdef DEBUG
2901		if (fsckcmds) {
2902			printf("%s: adjust number of free clusters by %+jd\n",
2903			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2904		}
2905#endif /* DEBUG */
2906		fs->fs_cstotal.cs_numclusters += cmd.value;
2907		break;
2908
2909	case FFS_SET_CWD:
2910#ifdef DEBUG
2911		if (fsckcmds) {
2912			printf("%s: set current directory to inode %jd\n",
2913			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2914		}
2915#endif /* DEBUG */
2916		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
2917			break;
2918		AUDIT_ARG_VNODE1(vp);
2919		if ((error = change_dir(vp, td)) != 0) {
2920			vput(vp);
2921			break;
2922		}
2923		VOP_UNLOCK(vp, 0);
2924		fdp = td->td_proc->p_fd;
2925		FILEDESC_XLOCK(fdp);
2926		vpold = fdp->fd_cdir;
2927		fdp->fd_cdir = vp;
2928		FILEDESC_XUNLOCK(fdp);
2929		vrele(vpold);
2930		break;
2931
2932	case FFS_SET_DOTDOT:
2933#ifdef DEBUG
2934		if (fsckcmds) {
2935			printf("%s: change .. in cwd from %jd to %jd\n",
2936			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2937			    (intmax_t)cmd.size);
2938		}
2939#endif /* DEBUG */
2940		/*
2941		 * First we have to get and lock the parent directory
2942		 * to which ".." points.
2943		 */
2944		error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
2945		if (error)
2946			break;
2947		/*
2948		 * Now we get and lock the child directory containing "..".
2949		 */
2950		FILEDESC_SLOCK(td->td_proc->p_fd);
2951		dvp = td->td_proc->p_fd->fd_cdir;
2952		FILEDESC_SUNLOCK(td->td_proc->p_fd);
2953		if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
2954			vput(fdvp);
2955			break;
2956		}
2957		dp = VTOI(dvp);
2958		dp->i_offset = 12;	/* XXX mastertemplate.dot_reclen */
2959		error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
2960		    DT_DIR, 0);
2961		cache_purge(fdvp);
2962		cache_purge(dvp);
2963		vput(dvp);
2964		vput(fdvp);
2965		break;
2966
2967	case FFS_UNLINK:
2968#ifdef DEBUG
2969		if (fsckcmds) {
2970			char buf[32];
2971
2972			if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
2973				strncpy(buf, "Name_too_long", 32);
2974			printf("%s: unlink %s (inode %jd)\n",
2975			    mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
2976		}
2977#endif /* DEBUG */
2978		/*
2979		 * kern_unlinkat will do its own start/finish writes and
2980		 * they do not nest, so drop ours here. Setting mp == NULL
2981		 * indicates that vn_finished_write is not needed down below.
2982		 */
2983		vn_finished_write(mp);
2984		mp = NULL;
2985		error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
2986		    UIO_USERSPACE, (ino_t)cmd.size);
2987		break;
2988
2989	case FFS_SET_INODE:
2990		if (ump->um_fsckpid != td->td_proc->p_pid) {
2991			error = EPERM;
2992			break;
2993		}
2994#ifdef DEBUG
2995		if (fsckcmds) {
2996			printf("%s: update inode %jd\n",
2997			    mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2998		}
2999#endif /* DEBUG */
3000		if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3001			break;
3002		AUDIT_ARG_VNODE1(vp);
3003		ip = VTOI(vp);
3004		if (ip->i_ump->um_fstype == UFS1)
3005			error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3006			    sizeof(struct ufs1_dinode));
3007		else
3008			error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3009			    sizeof(struct ufs2_dinode));
3010		if (error) {
3011			vput(vp);
3012			break;
3013		}
3014		ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3015		error = ffs_update(vp, 1);
3016		vput(vp);
3017		break;
3018
3019	case FFS_SET_BUFOUTPUT:
3020		if (ump->um_fsckpid != td->td_proc->p_pid) {
3021			error = EPERM;
3022			break;
3023		}
3024		if (VTOI(vp)->i_ump != ump) {
3025			error = EINVAL;
3026			break;
3027		}
3028#ifdef DEBUG
3029		if (fsckcmds) {
3030			printf("%s: %s buffered output for descriptor %jd\n",
3031			    mp->mnt_stat.f_mntonname,
3032			    cmd.size == 1 ? "enable" : "disable",
3033			    (intmax_t)cmd.value);
3034		}
3035#endif /* DEBUG */
3036		if ((error = getvnode(td->td_proc->p_fd, cmd.value,
3037		    cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0)
3038			break;
3039		if (vfp->f_vnode->v_type != VCHR) {
3040			fdrop(vfp, td);
3041			error = EINVAL;
3042			break;
3043		}
3044		if (origops == NULL) {
3045			origops = vfp->f_ops;
3046			bcopy((void *)origops, (void *)&bufferedops,
3047			    sizeof(bufferedops));
3048			bufferedops.fo_write = buffered_write;
3049		}
3050		if (cmd.size == 1)
3051			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3052			    (uintptr_t)&bufferedops);
3053		else
3054			atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3055			    (uintptr_t)origops);
3056		fdrop(vfp, td);
3057		break;
3058
3059	default:
3060#ifdef DEBUG
3061		if (fsckcmds) {
3062			printf("Invalid request %d from fsck\n",
3063			    oidp->oid_number);
3064		}
3065#endif /* DEBUG */
3066		error = EINVAL;
3067		break;
3068
3069	}
3070	fdrop(fp, td);
3071	vn_finished_write(mp);
3072	return (error);
3073}
3074
3075/*
3076 * Function to switch a descriptor to use the buffer cache to stage
3077 * its I/O. This is needed so that writes to the filesystem device
3078 * will give snapshots a chance to copy modified blocks for which it
3079 * needs to retain copies.
3080 */
3081static int
3082buffered_write(fp, uio, active_cred, flags, td)
3083	struct file *fp;
3084	struct uio *uio;
3085	struct ucred *active_cred;
3086	int flags;
3087	struct thread *td;
3088{
3089	struct vnode *devvp, *vp;
3090	struct inode *ip;
3091	struct buf *bp;
3092	struct fs *fs;
3093	struct filedesc *fdp;
3094	int error;
3095	daddr_t lbn;
3096
3097	/*
3098	 * The devvp is associated with the /dev filesystem. To discover
3099	 * the filesystem with which the device is associated, we depend
3100	 * on the application setting the current directory to a location
3101	 * within the filesystem being written. Yes, this is an ugly hack.
3102	 */
3103	devvp = fp->f_vnode;
3104	if (!vn_isdisk(devvp, NULL))
3105		return (EINVAL);
3106	fdp = td->td_proc->p_fd;
3107	FILEDESC_SLOCK(fdp);
3108	vp = fdp->fd_cdir;
3109	vref(vp);
3110	FILEDESC_SUNLOCK(fdp);
3111	vn_lock(vp, LK_SHARED | LK_RETRY);
3112	/*
3113	 * Check that the current directory vnode indeed belongs to
3114	 * UFS before trying to dereference UFS-specific v_data fields.
3115	 */
3116	if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3117		vput(vp);
3118		return (EINVAL);
3119	}
3120	ip = VTOI(vp);
3121	if (ip->i_devvp != devvp) {
3122		vput(vp);
3123		return (EINVAL);
3124	}
3125	fs = ip->i_fs;
3126	vput(vp);
3127	foffset_lock_uio(fp, uio, flags);
3128	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3129#ifdef DEBUG
3130	if (fsckcmds) {
3131		printf("%s: buffered write for block %jd\n",
3132		    fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3133	}
3134#endif /* DEBUG */
3135	/*
3136	 * All I/O must be contained within a filesystem block, start on
3137	 * a fragment boundary, and be a multiple of fragments in length.
3138	 */
3139	if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3140	    fragoff(fs, uio->uio_offset) != 0 ||
3141	    fragoff(fs, uio->uio_resid) != 0) {
3142		error = EINVAL;
3143		goto out;
3144	}
3145	lbn = numfrags(fs, uio->uio_offset);
3146	bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3147	bp->b_flags |= B_RELBUF;
3148	if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3149		brelse(bp);
3150		goto out;
3151	}
3152	error = bwrite(bp);
3153out:
3154	VOP_UNLOCK(devvp, 0);
3155	foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);
3156	return (error);
3157}
3158