1/*	$NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $	*/
2/* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3
4/*-
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 * Copyright (c) 2002 Networks Associates Technology, Inc.
8 * All rights reserved.
9 *
10 * This software was developed for the FreeBSD Project by Marshall
11 * Kirk McKusick and Network Associates Laboratories, the Security
12 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
13 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
14 * research program
15 *
16 * Copyright (c) 1982, 1986, 1989, 1993
17 *	The Regents of the University of California.  All rights reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 *    notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 *    notice, this list of conditions and the following disclaimer in the
26 *    documentation and/or other materials provided with the distribution.
27 * 3. Neither the name of the University nor the names of its contributors
28 *    may be used to endorse or promote products derived from this software
29 *    without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 */
43
44#include <sys/param.h>
45#include <sys/time.h>
46
47#include <errno.h>
48#include <stdint.h>
49
50#include "makefs.h"
51
52#include <ufs/ufs/dinode.h>
53#include <ufs/ffs/fs.h>
54
55#include "ffs/ufs_bswap.h"
56#include "ffs/buf.h"
57#include "ffs/ufs_inode.h"
58#include "ffs/ffs_extern.h"
59
60static int scanc(u_int, const u_char *, const u_char *, int);
61
62static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
63static daddr_t ffs_alloccgblk(struct inode *, struct m_buf *, daddr_t);
64static daddr_t ffs_hashalloc(struct inode *, u_int, daddr_t, int,
65		     daddr_t (*)(struct inode *, int, daddr_t, int));
66static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
67
68/*
69 * Allocate a block in the file system.
70 *
71 * The size of the requested block is given, which must be some
72 * multiple of fs_fsize and <= fs_bsize.
73 * A preference may be optionally specified. If a preference is given
74 * the following hierarchy is used to allocate a block:
75 *   1) allocate the requested block.
76 *   2) allocate a rotationally optimal block in the same cylinder.
77 *   3) allocate a block in the same cylinder group.
78 *   4) quadratically rehash into other cylinder groups, until an
79 *      available block is located.
80 * If no block preference is given the following hierarchy is used
81 * to allocate a block:
82 *   1) allocate a block in the cylinder group that contains the
83 *      inode for the file.
84 *   2) quadratically rehash into other cylinder groups, until an
85 *      available block is located.
86 */
87int
88ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
89    daddr_t *bnp)
90{
91	struct fs *fs = ip->i_fs;
92	daddr_t bno;
93	int cg;
94
95	*bnp = 0;
96	if (size > fs->fs_bsize || fragoff(fs, size) != 0) {
97		errx(1, "ffs_alloc: bad size: bsize %d size %d",
98		    fs->fs_bsize, size);
99	}
100	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
101		goto nospace;
102	if (bpref >= fs->fs_size)
103		bpref = 0;
104	if (bpref == 0)
105		cg = ino_to_cg(fs, ip->i_number);
106	else
107		cg = dtog(fs, bpref);
108	bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
109	if (bno > 0) {
110		if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
111			ip->i_ffs1_blocks += size / DEV_BSIZE;
112		else
113			ip->i_ffs2_blocks += size / DEV_BSIZE;
114		*bnp = bno;
115		return (0);
116	}
117nospace:
118	return (ENOSPC);
119}
120
121/*
122 * Select the desired position for the next block in a file.  The file is
123 * logically divided into sections. The first section is composed of the
124 * direct blocks. Each additional section contains fs_maxbpg blocks.
125 *
126 * If no blocks have been allocated in the first section, the policy is to
127 * request a block in the same cylinder group as the inode that describes
128 * the file. If no blocks have been allocated in any other section, the
129 * policy is to place the section in a cylinder group with a greater than
130 * average number of free blocks.  An appropriate cylinder group is found
131 * by using a rotor that sweeps the cylinder groups. When a new group of
132 * blocks is needed, the sweep begins in the cylinder group following the
133 * cylinder group from which the previous allocation was made. The sweep
134 * continues until a cylinder group with greater than the average number
135 * of free blocks is found. If the allocation is for the first block in an
136 * indirect block, the information on the previous allocation is unavailable;
137 * here a best guess is made based upon the logical block number being
138 * allocated.
139 *
140 * If a section is already partially allocated, the policy is to
141 * contiguously allocate fs_maxcontig blocks.  The end of one of these
142 * contiguous blocks and the beginning of the next is physically separated
143 * so that the disk head will be in transit between them for at least
144 * fs_rotdelay milliseconds.  This is to allow time for the processor to
145 * schedule another I/O transfer.
146 */
147/* XXX ondisk32 */
148daddr_t
149ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
150{
151	struct fs *fs;
152	u_int cg, startcg;
153	int avgbfree;
154
155	fs = ip->i_fs;
156	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
157		if (lbn < UFS_NDADDR + NINDIR(fs)) {
158			cg = ino_to_cg(fs, ip->i_number);
159			return (fs->fs_fpg * cg + fs->fs_frag);
160		}
161		/*
162		 * Find a cylinder with greater than average number of
163		 * unused data blocks.
164		 */
165		if (indx == 0 || bap[indx - 1] == 0)
166			startcg =
167			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
168		else
169			startcg = dtog(fs,
170				ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
171		startcg %= fs->fs_ncg;
172		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
173		for (cg = startcg; cg < fs->fs_ncg; cg++)
174			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
175				return (fs->fs_fpg * cg + fs->fs_frag);
176		for (cg = 0; cg <= startcg; cg++)
177			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
178				return (fs->fs_fpg * cg + fs->fs_frag);
179		return (0);
180	}
181	/*
182	 * We just always try to lay things out contiguously.
183	 */
184	return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
185}
186
187daddr_t
188ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
189{
190	struct fs *fs;
191	u_int cg, startcg;
192	int avgbfree;
193
194	fs = ip->i_fs;
195	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
196		if (lbn < UFS_NDADDR + NINDIR(fs)) {
197			cg = ino_to_cg(fs, ip->i_number);
198			return (fs->fs_fpg * cg + fs->fs_frag);
199		}
200		/*
201		 * Find a cylinder with greater than average number of
202		 * unused data blocks.
203		 */
204		if (indx == 0 || bap[indx - 1] == 0)
205			startcg =
206			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
207		else
208			startcg = dtog(fs,
209				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
210		startcg %= fs->fs_ncg;
211		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
212		for (cg = startcg; cg < fs->fs_ncg; cg++)
213			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
214				return (fs->fs_fpg * cg + fs->fs_frag);
215			}
216		for (cg = 0; cg < startcg; cg++)
217			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
218				return (fs->fs_fpg * cg + fs->fs_frag);
219			}
220		return (0);
221	}
222	/*
223	 * We just always try to lay things out contiguously.
224	 */
225	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
226}
227
228/*
229 * Implement the cylinder overflow algorithm.
230 *
231 * The policy implemented by this algorithm is:
232 *   1) allocate the block in its requested cylinder group.
233 *   2) quadratically rehash on the cylinder group number.
234 *   3) brute force search for a free block.
235 *
236 * `size':	size for data blocks, mode for inodes
237 */
238/*VARARGS5*/
239static daddr_t
240ffs_hashalloc(struct inode *ip, u_int cg, daddr_t pref, int size,
241    daddr_t (*allocator)(struct inode *, int, daddr_t, int))
242{
243	struct fs *fs;
244	daddr_t result;
245	u_int i, icg = cg;
246
247	fs = ip->i_fs;
248	/*
249	 * 1: preferred cylinder group
250	 */
251	result = (*allocator)(ip, cg, pref, size);
252	if (result)
253		return (result);
254	/*
255	 * 2: quadratic rehash
256	 */
257	for (i = 1; i < fs->fs_ncg; i *= 2) {
258		cg += i;
259		if (cg >= fs->fs_ncg)
260			cg -= fs->fs_ncg;
261		result = (*allocator)(ip, cg, 0, size);
262		if (result)
263			return (result);
264	}
265	/*
266	 * 3: brute force search
267	 * Note that we start at i == 2, since 0 was checked initially,
268	 * and 1 is always checked in the quadratic rehash.
269	 */
270	cg = (icg + 2) % fs->fs_ncg;
271	for (i = 2; i < fs->fs_ncg; i++) {
272		result = (*allocator)(ip, cg, 0, size);
273		if (result)
274			return (result);
275		cg++;
276		if (cg == fs->fs_ncg)
277			cg = 0;
278	}
279	return (0);
280}
281
282/*
283 * Determine whether a block can be allocated.
284 *
285 * Check to see if a block of the appropriate size is available,
286 * and if it is, allocate it.
287 */
288static daddr_t
289ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
290{
291	struct cg *cgp;
292	struct m_buf *bp;
293	daddr_t bno, blkno;
294	int error, frags, allocsiz, i;
295	struct fs *fs = ip->i_fs;
296	const int needswap = UFS_FSNEEDSWAP(fs);
297
298	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
299		return (0);
300	error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
301	    (int)fs->fs_cgsize, NULL, &bp);
302	if (error) {
303		return (0);
304	}
305	cgp = (struct cg *)bp->b_data;
306	if (!cg_chkmagic_swap(cgp, needswap) ||
307	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
308		brelse(bp);
309		return (0);
310	}
311	if (size == fs->fs_bsize) {
312		bno = ffs_alloccgblk(ip, bp, bpref);
313		bdwrite(bp);
314		return (bno);
315	}
316	/*
317	 * check to see if any fragments are already available
318	 * allocsiz is the size which will be allocated, hacking
319	 * it down to a smaller size if necessary
320	 */
321	frags = numfrags(fs, size);
322	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
323		if (cgp->cg_frsum[allocsiz] != 0)
324			break;
325	if (allocsiz == fs->fs_frag) {
326		/*
327		 * no fragments were available, so a block will be
328		 * allocated, and hacked up
329		 */
330		if (cgp->cg_cs.cs_nbfree == 0) {
331			brelse(bp);
332			return (0);
333		}
334		bno = ffs_alloccgblk(ip, bp, bpref);
335		bpref = dtogd(fs, bno);
336		for (i = frags; i < fs->fs_frag; i++)
337			setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
338		i = fs->fs_frag - frags;
339		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
340		fs->fs_cstotal.cs_nffree += i;
341		fs->fs_cs(fs, cg).cs_nffree += i;
342		fs->fs_fmod = 1;
343		ufs_add32(cgp->cg_frsum[i], 1, needswap);
344		bdwrite(bp);
345		return (bno);
346	}
347	bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
348	for (i = 0; i < frags; i++)
349		clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
350	ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
351	fs->fs_cstotal.cs_nffree -= frags;
352	fs->fs_cs(fs, cg).cs_nffree -= frags;
353	fs->fs_fmod = 1;
354	ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
355	if (frags != allocsiz)
356		ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
357	blkno = cg * fs->fs_fpg + bno;
358	bdwrite(bp);
359	return blkno;
360}
361
362/*
363 * Allocate a block in a cylinder group.
364 *
365 * This algorithm implements the following policy:
366 *   1) allocate the requested block.
367 *   2) allocate a rotationally optimal block in the same cylinder.
368 *   3) allocate the next available block on the block rotor for the
369 *      specified cylinder group.
370 * Note that this routine only allocates fs_bsize blocks; these
371 * blocks may be fragmented by the routine that allocates them.
372 */
373static daddr_t
374ffs_alloccgblk(struct inode *ip, struct m_buf *bp, daddr_t bpref)
375{
376	struct cg *cgp;
377	daddr_t blkno;
378	int32_t bno;
379	struct fs *fs = ip->i_fs;
380	const int needswap = UFS_FSNEEDSWAP(fs);
381	u_int8_t *blksfree_swap;
382
383	cgp = (struct cg *)bp->b_data;
384	blksfree_swap = cg_blksfree_swap(cgp, needswap);
385	if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
386		bpref = ufs_rw32(cgp->cg_rotor, needswap);
387	} else {
388		bpref = blknum(fs, bpref);
389		bno = dtogd(fs, bpref);
390		/*
391		 * if the requested block is available, use it
392		 */
393		if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno)))
394			goto gotit;
395	}
396	/*
397	 * Take the next available one in this cylinder group.
398	 */
399	bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
400	if (bno < 0)
401		return (0);
402	cgp->cg_rotor = ufs_rw32(bno, needswap);
403gotit:
404	blkno = fragstoblks(fs, bno);
405	ffs_clrblock(fs, blksfree_swap, (long)blkno);
406	ffs_clusteracct(fs, cgp, blkno, -1);
407	ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
408	fs->fs_cstotal.cs_nbfree--;
409	fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
410	fs->fs_fmod = 1;
411	blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
412	return (blkno);
413}
414
415/*
416 * Free a block or fragment.
417 *
418 * The specified block or fragment is placed back in the
419 * free map. If a fragment is deallocated, a possible
420 * block reassembly is checked.
421 */
422void
423ffs_blkfree(struct inode *ip, daddr_t bno, long size)
424{
425	struct cg *cgp;
426	struct m_buf *bp;
427	int32_t fragno, cgbno;
428	int i, error, cg, blk, frags, bbase;
429	struct fs *fs = ip->i_fs;
430	const int needswap = UFS_FSNEEDSWAP(fs);
431
432	if (size > fs->fs_bsize || fragoff(fs, size) != 0 ||
433	    fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
434		errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
435		    (long long)bno, fs->fs_bsize, size);
436	}
437	cg = dtog(fs, bno);
438	if (bno >= fs->fs_size) {
439		warnx("bad block %lld, ino %ju", (long long)bno,
440		    (uintmax_t)ip->i_number);
441		return;
442	}
443	error = bread((void *)ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
444	    (int)fs->fs_cgsize, NULL, &bp);
445	if (error) {
446		return;
447	}
448	cgp = (struct cg *)bp->b_data;
449	if (!cg_chkmagic_swap(cgp, needswap)) {
450		brelse(bp);
451		return;
452	}
453	cgbno = dtogd(fs, bno);
454	if (size == fs->fs_bsize) {
455		fragno = fragstoblks(fs, cgbno);
456		if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
457			errx(1, "blkfree: freeing free block %lld",
458			    (long long)bno);
459		}
460		ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
461		ffs_clusteracct(fs, cgp, fragno, 1);
462		ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
463		fs->fs_cstotal.cs_nbfree++;
464		fs->fs_cs(fs, cg).cs_nbfree++;
465	} else {
466		bbase = cgbno - fragnum(fs, cgbno);
467		/*
468		 * decrement the counts associated with the old frags
469		 */
470		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
471		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
472		/*
473		 * deallocate the fragment
474		 */
475		frags = numfrags(fs, size);
476		for (i = 0; i < frags; i++) {
477			if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
478				errx(1, "blkfree: freeing free frag: block %lld",
479				    (long long)(cgbno + i));
480			}
481			setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
482		}
483		ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
484		fs->fs_cstotal.cs_nffree += i;
485		fs->fs_cs(fs, cg).cs_nffree += i;
486		/*
487		 * add back in counts associated with the new frags
488		 */
489		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
490		ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
491		/*
492		 * if a complete block has been reassembled, account for it
493		 */
494		fragno = fragstoblks(fs, bbase);
495		if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
496			ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
497			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
498			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
499			ffs_clusteracct(fs, cgp, fragno, 1);
500			ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
501			fs->fs_cstotal.cs_nbfree++;
502			fs->fs_cs(fs, cg).cs_nbfree++;
503		}
504	}
505	fs->fs_fmod = 1;
506	bdwrite(bp);
507}
508
509
510static int
511scanc(u_int size, const u_char *cp, const u_char table[], int mask)
512{
513	const u_char *end = &cp[size];
514
515	while (cp < end && (table[*cp] & mask) == 0)
516		cp++;
517	return (end - cp);
518}
519
520/*
521 * Find a block of the specified size in the specified cylinder group.
522 *
523 * It is a panic if a request is made to find a block if none are
524 * available.
525 */
526static int32_t
527ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
528{
529	int32_t bno;
530	int start, len, loc, i;
531	int blk, field, subfield, pos;
532	int ostart, olen;
533	const int needswap = UFS_FSNEEDSWAP(fs);
534
535	/*
536	 * find the fragment by searching through the free block
537	 * map for an appropriate bit pattern
538	 */
539	if (bpref)
540		start = dtogd(fs, bpref) / NBBY;
541	else
542		start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
543	len = howmany(fs->fs_fpg, NBBY) - start;
544	ostart = start;
545	olen = len;
546	loc = scanc((u_int)len,
547		(const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
548		(const u_char *)fragtbl[fs->fs_frag],
549		(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
550	if (loc == 0) {
551		len = start + 1;
552		start = 0;
553		loc = scanc((u_int)len,
554			(const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
555			(const u_char *)fragtbl[fs->fs_frag],
556			(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
557		if (loc == 0) {
558			errx(1,
559    "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
560				ostart, olen,
561				ufs_rw32(cgp->cg_freeoff, needswap),
562				(long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
563			/* NOTREACHED */
564		}
565	}
566	bno = (start + len - loc) * NBBY;
567	cgp->cg_frotor = ufs_rw32(bno, needswap);
568	/*
569	 * found the byte in the map
570	 * sift through the bits to find the selected frag
571	 */
572	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
573		blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
574		blk <<= 1;
575		field = around[allocsiz];
576		subfield = inside[allocsiz];
577		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
578			if ((blk & field) == subfield)
579				return (bno + pos);
580			field <<= 1;
581			subfield <<= 1;
582		}
583	}
584	errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
585	return (-1);
586}
587
588/*
589 * Update the cluster map because of an allocation or free.
590 *
591 * Cnt == 1 means free; cnt == -1 means allocating.
592 */
593void
594ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
595{
596	int32_t *sump;
597	int32_t *lp;
598	u_char *freemapp, *mapp;
599	int i, start, end, forw, back, map, bit;
600	const int needswap = UFS_FSNEEDSWAP(fs);
601
602	if (fs->fs_contigsumsize <= 0)
603		return;
604	freemapp = cg_clustersfree_swap(cgp, needswap);
605	sump = cg_clustersum_swap(cgp, needswap);
606	/*
607	 * Allocate or clear the actual block.
608	 */
609	if (cnt > 0)
610		setbit(freemapp, blkno);
611	else
612		clrbit(freemapp, blkno);
613	/*
614	 * Find the size of the cluster going forward.
615	 */
616	start = blkno + 1;
617	end = start + fs->fs_contigsumsize;
618	if ((unsigned)end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
619		end = ufs_rw32(cgp->cg_nclusterblks, needswap);
620	mapp = &freemapp[start / NBBY];
621	map = *mapp++;
622	bit = 1 << (start % NBBY);
623	for (i = start; i < end; i++) {
624		if ((map & bit) == 0)
625			break;
626		if ((i & (NBBY - 1)) != (NBBY - 1)) {
627			bit <<= 1;
628		} else {
629			map = *mapp++;
630			bit = 1;
631		}
632	}
633	forw = i - start;
634	/*
635	 * Find the size of the cluster going backward.
636	 */
637	start = blkno - 1;
638	end = start - fs->fs_contigsumsize;
639	if (end < 0)
640		end = -1;
641	mapp = &freemapp[start / NBBY];
642	map = *mapp--;
643	bit = 1 << (start % NBBY);
644	for (i = start; i > end; i--) {
645		if ((map & bit) == 0)
646			break;
647		if ((i & (NBBY - 1)) != 0) {
648			bit >>= 1;
649		} else {
650			map = *mapp--;
651			bit = 1 << (NBBY - 1);
652		}
653	}
654	back = start - i;
655	/*
656	 * Account for old cluster and the possibly new forward and
657	 * back clusters.
658	 */
659	i = back + forw + 1;
660	if (i > fs->fs_contigsumsize)
661		i = fs->fs_contigsumsize;
662	ufs_add32(sump[i], cnt, needswap);
663	if (back > 0)
664		ufs_add32(sump[back], -cnt, needswap);
665	if (forw > 0)
666		ufs_add32(sump[forw], -cnt, needswap);
667
668	/*
669	 * Update cluster summary information.
670	 */
671	lp = &sump[fs->fs_contigsumsize];
672	for (i = fs->fs_contigsumsize; i > 0; i--)
673		if (ufs_rw32(*lp--, needswap) > 0)
674			break;
675	fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
676}
677