1/*	$NetBSD: ffs_alloc.c,v 1.32 2023/03/13 22:17:24 christos Exp $	*/
2/* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3
4/*
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Marshall
9 * Kirk McKusick and Network Associates Laboratories, the Security
10 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12 * research program
13 *
14 * Copyright (c) 1982, 1986, 1989, 1993
15 *	The Regents of the University of California.  All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 *    notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 *    notice, this list of conditions and the following disclaimer in the
24 *    documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	@(#)ffs_alloc.c	8.19 (Berkeley) 7/13/95
42 */
43
44#if HAVE_NBTOOL_CONFIG_H
45#include "nbtool_config.h"
46#endif
47
48#include <sys/cdefs.h>
49#if defined(__RCSID) && !defined(__lint)
50__RCSID("$NetBSD: ffs_alloc.c,v 1.32 2023/03/13 22:17:24 christos Exp $");
51#endif	/* !__lint */
52
53#include <sys/param.h>
54#include <sys/time.h>
55
56#include <errno.h>
57
58#include "makefs.h"
59
60#include <ufs/ufs/dinode.h>
61#include <ufs/ufs/ufs_bswap.h>
62#include <ufs/ffs/fs.h>
63
64#include "ffs/buf.h"
65#include "ffs/ufs_inode.h"
66#include "ffs/ffs_extern.h"
67
68
69static int scanc(u_int, const u_char *, const u_char *, int);
70
71static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
72static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
73static daddr_t ffs_hashalloc(struct inode *, uint32_t, daddr_t, int,
74		     daddr_t (*)(struct inode *, int, daddr_t, int));
75static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
76
77/* in ffs_tables.c */
78extern const int inside[], around[];
79extern const u_char * const fragtbl[];
80
81/*
82 * Allocate a block in the file system.
83 *
84 * The size of the requested block is given, which must be some
85 * multiple of fs_fsize and <= fs_bsize.
86 * A preference may be optionally specified. If a preference is given
87 * the following hierarchy is used to allocate a block:
88 *   1) allocate the requested block.
89 *   2) allocate a rotationally optimal block in the same cylinder.
90 *   3) allocate a block in the same cylinder group.
91 *   4) quadradically rehash into other cylinder groups, until an
92 *      available block is located.
93 * If no block preference is given the following hierarchy is used
94 * to allocate a block:
95 *   1) allocate a block in the cylinder group that contains the
96 *      inode for the file.
97 *   2) quadradically rehash into other cylinder groups, until an
98 *      available block is located.
99 */
100int
101ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
102    daddr_t *bnp)
103{
104	struct fs *fs = ip->i_fs;
105	daddr_t bno;
106	int cg;
107
108	*bnp = 0;
109	if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0) {
110		errx(EXIT_FAILURE, "%s: bad size: bsize %d size %d", __func__,
111		    fs->fs_bsize, size);
112	}
113	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
114		goto nospace;
115	if (bpref >= fs->fs_size)
116		bpref = 0;
117	if (bpref == 0)
118		cg = ino_to_cg(fs, ip->i_number);
119	else
120		cg = dtog(fs, bpref);
121	bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
122	if (bno > 0) {
123		DIP_ADD(ip, blocks, size / DEV_BSIZE);
124		*bnp = bno;
125		return (0);
126	}
127nospace:
128	return (ENOSPC);
129}
130
131/*
132 * Select the desired position for the next block in a file.  The file is
133 * logically divided into sections. The first section is composed of the
134 * direct blocks. Each additional section contains fs_maxbpg blocks.
135 *
136 * If no blocks have been allocated in the first section, the policy is to
137 * request a block in the same cylinder group as the inode that describes
138 * the file. If no blocks have been allocated in any other section, the
139 * policy is to place the section in a cylinder group with a greater than
140 * average number of free blocks.  An appropriate cylinder group is found
141 * by using a rotor that sweeps the cylinder groups. When a new group of
142 * blocks is needed, the sweep begins in the cylinder group following the
143 * cylinder group from which the previous allocation was made. The sweep
144 * continues until a cylinder group with greater than the average number
145 * of free blocks is found. If the allocation is for the first block in an
146 * indirect block, the information on the previous allocation is unavailable;
147 * here a best guess is made based upon the logical block number being
148 * allocated.
149 *
150 * If a section is already partially allocated, the policy is to
151 * contiguously allocate fs_maxcontig blocks.  The end of one of these
152 * contiguous blocks and the beginning of the next is physically separated
153 * so that the disk head will be in transit between them for at least
154 * fs_rotdelay milliseconds.  This is to allow time for the processor to
155 * schedule another I/O transfer.
156 */
157/* XXX ondisk32 */
158daddr_t
159ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
160{
161	struct fs *fs;
162	uint32_t cg, startcg;
163	int avgbfree;
164
165	fs = ip->i_fs;
166	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
167		if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) {
168			cg = ino_to_cg(fs, ip->i_number);
169			return (fs->fs_fpg * cg + fs->fs_frag);
170		}
171		/*
172		 * Find a cylinder with greater than average number of
173		 * unused data blocks.
174		 */
175		if (indx == 0 || bap[indx - 1] == 0)
176			startcg =
177			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
178		else
179			startcg = dtog(fs,
180				ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
181		startcg %= fs->fs_ncg;
182		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
183		for (cg = startcg; cg < fs->fs_ncg; cg++)
184			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
185				return (fs->fs_fpg * cg + fs->fs_frag);
186		for (cg = 0; cg <= startcg; cg++)
187			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
188				return (fs->fs_fpg * cg + fs->fs_frag);
189		return (0);
190	}
191	/*
192	 * We just always try to lay things out contiguously.
193	 */
194	return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
195}
196
197daddr_t
198ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
199{
200	struct fs *fs;
201	uint32_t cg, startcg;
202	int avgbfree;
203
204	fs = ip->i_fs;
205	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
206		if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) {
207			cg = ino_to_cg(fs, ip->i_number);
208			return (fs->fs_fpg * cg + fs->fs_frag);
209		}
210		/*
211		 * Find a cylinder with greater than average number of
212		 * unused data blocks.
213		 */
214		if (indx == 0 || bap[indx - 1] == 0)
215			startcg =
216			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
217		else
218			startcg = dtog(fs,
219				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
220		startcg %= fs->fs_ncg;
221		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
222		for (cg = startcg; cg < fs->fs_ncg; cg++)
223			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
224				return (fs->fs_fpg * cg + fs->fs_frag);
225			}
226		for (cg = 0; cg < startcg; cg++)
227			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
228				return (fs->fs_fpg * cg + fs->fs_frag);
229			}
230		return (0);
231	}
232	/*
233	 * We just always try to lay things out contiguously.
234	 */
235	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
236}
237
238/*
239 * Implement the cylinder overflow algorithm.
240 *
241 * The policy implemented by this algorithm is:
242 *   1) allocate the block in its requested cylinder group.
243 *   2) quadradically rehash on the cylinder group number.
244 *   3) brute force search for a free block.
245 *
246 * `size':	size for data blocks, mode for inodes
247 */
248/*VARARGS5*/
249static daddr_t
250ffs_hashalloc(struct inode *ip, uint32_t cg, daddr_t pref, int size,
251    daddr_t (*allocator)(struct inode *, int, daddr_t, int))
252{
253	struct fs *fs;
254	daddr_t result;
255	uint32_t i, icg = cg;
256
257	fs = ip->i_fs;
258	/*
259	 * 1: preferred cylinder group
260	 */
261	result = (*allocator)(ip, cg, pref, size);
262	if (result)
263		return (result);
264	/*
265	 * 2: quadratic rehash
266	 */
267	for (i = 1; i < fs->fs_ncg; i *= 2) {
268		cg += i;
269		if (cg >= fs->fs_ncg)
270			cg -= fs->fs_ncg;
271		result = (*allocator)(ip, cg, 0, size);
272		if (result)
273			return (result);
274	}
275	/*
276	 * 3: brute force search
277	 * Note that we start at i == 2, since 0 was checked initially,
278	 * and 1 is always checked in the quadratic rehash.
279	 */
280	cg = (icg + 2) % fs->fs_ncg;
281	for (i = 2; i < fs->fs_ncg; i++) {
282		result = (*allocator)(ip, cg, 0, size);
283		if (result)
284			return (result);
285		cg++;
286		if (cg == fs->fs_ncg)
287			cg = 0;
288	}
289	return (0);
290}
291
292/*
293 * Determine whether a block can be allocated.
294 *
295 * Check to see if a block of the appropriate size is available,
296 * and if it is, allocate it.
297 */
298static daddr_t
299ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
300{
301	struct cg *cgp;
302	struct buf *bp;
303	daddr_t bno, blkno;
304	int error, frags, allocsiz, i;
305	struct fs *fs = ip->i_fs;
306	const int needswap = UFS_FSNEEDSWAP(fs);
307
308	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
309		return (0);
310	error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
311	    (int)fs->fs_cgsize, 0, &bp);
312	if (error) {
313		return (0);
314	}
315	cgp = (struct cg *)bp->b_data;
316	if (!cg_chkmagic(cgp, needswap) ||
317	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
318		brelse(bp, 0);
319		return (0);
320	}
321	if (size == fs->fs_bsize) {
322		bno = ffs_alloccgblk(ip, bp, bpref);
323		bwrite(bp);
324		return (bno);
325	}
326	/*
327	 * check to see if any fragments are already available
328	 * allocsiz is the size which will be allocated, hacking
329	 * it down to a smaller size if necessary
330	 */
331	frags = ffs_numfrags(fs, size);
332	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
333		if (cgp->cg_frsum[allocsiz] != 0)
334			break;
335	if (allocsiz == fs->fs_frag) {
336		/*
337		 * no fragments were available, so a block will be
338		 * allocated, and hacked up
339		 */
340		if (cgp->cg_cs.cs_nbfree == 0) {
341			brelse(bp, 0);
342			return (0);
343		}
344		bno = ffs_alloccgblk(ip, bp, bpref);
345		bpref = dtogd(fs, bno);
346		for (i = frags; i < fs->fs_frag; i++)
347			setbit(cg_blksfree(cgp, needswap), bpref + i);
348		i = fs->fs_frag - frags;
349		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
350		fs->fs_cstotal.cs_nffree += i;
351		fs->fs_cs(fs, cg).cs_nffree += i;
352		fs->fs_fmod = 1;
353		ufs_add32(cgp->cg_frsum[i], 1, needswap);
354		bdwrite(bp);
355		return (bno);
356	}
357	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
358	for (i = 0; i < frags; i++)
359		clrbit(cg_blksfree(cgp, needswap), bno + i);
360	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
361	fs->fs_cstotal.cs_nffree -= frags;
362	fs->fs_cs(fs, cg).cs_nffree -= frags;
363	fs->fs_fmod = 1;
364	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
365	if (frags != allocsiz)
366		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
367	blkno = cg * fs->fs_fpg + bno;
368	bdwrite(bp);
369	return blkno;
370}
371
372/*
373 * Allocate a block in a cylinder group.
374 *
375 * This algorithm implements the following policy:
376 *   1) allocate the requested block.
377 *   2) allocate a rotationally optimal block in the same cylinder.
378 *   3) allocate the next available block on the block rotor for the
379 *      specified cylinder group.
380 * Note that this routine only allocates fs_bsize blocks; these
381 * blocks may be fragmented by the routine that allocates them.
382 */
383static daddr_t
384ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
385{
386	struct cg *cgp;
387	daddr_t blkno;
388	int32_t bno;
389	struct fs *fs = ip->i_fs;
390	const int needswap = UFS_FSNEEDSWAP(fs);
391	u_int8_t *blksfree;
392
393	cgp = (struct cg *)bp->b_data;
394	blksfree = cg_blksfree(cgp, needswap);
395	if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
396		bpref = ufs_rw32(cgp->cg_rotor, needswap);
397	} else {
398		bpref = ffs_blknum(fs, bpref);
399		bno = dtogd(fs, bpref);
400		/*
401		 * if the requested block is available, use it
402		 */
403		if (ffs_isblock(fs, blksfree, ffs_fragstoblks(fs, bno)))
404			goto gotit;
405	}
406	/*
407	 * Take the next available one in this cylinder group.
408	 */
409	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
410	if (bno < 0)
411		return (0);
412	cgp->cg_rotor = ufs_rw32(bno, needswap);
413gotit:
414	blkno = ffs_fragstoblks(fs, bno);
415	ffs_clrblock(fs, blksfree, (long)blkno);
416	ffs_clusteracct(fs, cgp, blkno, -1);
417	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
418	fs->fs_cstotal.cs_nbfree--;
419	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
420	fs->fs_fmod = 1;
421	blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
422	return (blkno);
423}
424
425/*
426 * Free a block or fragment.
427 *
428 * The specified block or fragment is placed back in the
429 * free map. If a fragment is deallocated, a possible
430 * block reassembly is checked.
431 */
432void
433ffs_blkfree(struct inode *ip, daddr_t bno, long size)
434{
435	struct cg *cgp;
436	struct buf *bp;
437	int32_t fragno, cgbno;
438	int i, error, cg, blk, frags, bbase;
439	struct fs *fs = ip->i_fs;
440	const int needswap = UFS_FSNEEDSWAP(fs);
441
442	if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0 ||
443	    ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) > fs->fs_frag) {
444		errx(EXIT_FAILURE, "%s: bad size: bno %lld bsize %d "
445		    "size %ld", __func__, (long long)bno, fs->fs_bsize, size);
446	}
447	cg = dtog(fs, bno);
448	if (bno >= fs->fs_size) {
449		warnx("bad block %lld, ino %llu", (long long)bno,
450		    (unsigned long long)ip->i_number);
451		return;
452	}
453	error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
454	    (int)fs->fs_cgsize, 0, &bp);
455	if (error) {
456		return;
457	}
458	cgp = (struct cg *)bp->b_data;
459	if (!cg_chkmagic(cgp, needswap)) {
460		brelse(bp, 0);
461		return;
462	}
463	cgbno = dtogd(fs, bno);
464	if (size == fs->fs_bsize) {
465		fragno = ffs_fragstoblks(fs, cgbno);
466		if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) {
467			errx(EXIT_FAILURE, "%s: freeing free block %lld",
468			    __func__, (long long)bno);
469		}
470		ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno);
471		ffs_clusteracct(fs, cgp, fragno, 1);
472		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
473		fs->fs_cstotal.cs_nbfree++;
474		fs->fs_cs(fs, cg).cs_nbfree++;
475	} else {
476		bbase = cgbno - ffs_fragnum(fs, cgbno);
477		/*
478		 * decrement the counts associated with the old frags
479		 */
480		blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
481		ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
482		/*
483		 * deallocate the fragment
484		 */
485		frags = ffs_numfrags(fs, size);
486		for (i = 0; i < frags; i++) {
487			if (isset(cg_blksfree(cgp, needswap), cgbno + i)) {
488				errx(EXIT_FAILURE, "%s: freeing free frag: "
489				    "block %lld", __func__,
490				    (long long)(cgbno + i));
491			}
492			setbit(cg_blksfree(cgp, needswap), cgbno + i);
493		}
494		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
495		fs->fs_cstotal.cs_nffree += i;
496		fs->fs_cs(fs, cg).cs_nffree += i;
497		/*
498		 * add back in counts associated with the new frags
499		 */
500		blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
501		ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
502		/*
503		 * if a complete block has been reassembled, account for it
504		 */
505		fragno = ffs_fragstoblks(fs, bbase);
506		if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) {
507			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
508			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
509			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
510			ffs_clusteracct(fs, cgp, fragno, 1);
511			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
512			fs->fs_cstotal.cs_nbfree++;
513			fs->fs_cs(fs, cg).cs_nbfree++;
514		}
515	}
516	fs->fs_fmod = 1;
517	bdwrite(bp);
518}
519
520
521static int
522scanc(u_int size, const u_char *cp, const u_char table[], int mask)
523{
524	const u_char *end = &cp[size];
525
526	while (cp < end && (table[*cp] & mask) == 0)
527		cp++;
528	return (end - cp);
529}
530
531/*
532 * Find a block of the specified size in the specified cylinder group.
533 *
534 * It is a panic if a request is made to find a block if none are
535 * available.
536 */
537static int32_t
538ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
539{
540	int32_t bno;
541	int start, len, loc, i;
542	int blk, field, subfield, pos;
543	int ostart, olen;
544	const int needswap = UFS_FSNEEDSWAP(fs);
545
546	/*
547	 * find the fragment by searching through the free block
548	 * map for an appropriate bit pattern
549	 */
550	if (bpref)
551		start = dtogd(fs, bpref) / NBBY;
552	else
553		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
554	len = howmany(fs->fs_fpg, NBBY) - start;
555	ostart = start;
556	olen = len;
557	loc = scanc((u_int)len,
558		(const u_char *)&cg_blksfree(cgp, needswap)[start],
559		(const u_char *)fragtbl[fs->fs_frag],
560		(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
561	if (loc == 0) {
562		len = start + 1;
563		start = 0;
564		loc = scanc((u_int)len,
565			(const u_char *)&cg_blksfree(cgp, needswap)[0],
566			(const u_char *)fragtbl[fs->fs_frag],
567			(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
568		if (loc == 0) {
569			errx(EXIT_FAILURE, "%s: map corrupted: start %d "
570			    "len %d offset %d %ld", __func__, ostart, olen,
571			    ufs_rw32(cgp->cg_freeoff, needswap),
572			    (long)cg_blksfree(cgp, needswap) - (long)cgp);
573			/* NOTREACHED */
574		}
575	}
576	bno = (start + len - loc) * NBBY;
577	cgp->cg_frotor = ufs_rw32(bno, needswap);
578	/*
579	 * found the byte in the map
580	 * sift through the bits to find the selected frag
581	 */
582	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
583		blk = blkmap(fs, cg_blksfree(cgp, needswap), bno);
584		blk <<= 1;
585		field = around[allocsiz];
586		subfield = inside[allocsiz];
587		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
588			if ((blk & field) == subfield)
589				return (bno + pos);
590			field <<= 1;
591			subfield <<= 1;
592		}
593	}
594	errx(EXIT_FAILURE, "%s: block not in map: bno %lld", __func__,
595	    (long long)bno);
596	return (-1);
597}
598