Deleted Added
full compact
ffs_alloc.c (186261) ffs_alloc.c (186334)
1/* $NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $ */
2/* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3
4/*
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Marshall
9 * Kirk McKusick and Network Associates Laboratories, the Security
10 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12 * research program
13 *
14 * Copyright (c) 1982, 1986, 1989, 1993
15 * The Regents of the University of California. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
42 */
43
44#include <sys/cdefs.h>
1/* $NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $ */
2/* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3
4/*
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Marshall
9 * Kirk McKusick and Network Associates Laboratories, the Security
10 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12 * research program
13 *
14 * Copyright (c) 1982, 1986, 1989, 1993
15 * The Regents of the University of California. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
42 */
43
44#include <sys/cdefs.h>
45#if defined(__RCSID) && !defined(__lint)
46__RCSID("$NetBSD: ffs_alloc.c,v 1.14 2004/06/20 22:20:18 jmc Exp $");
47#endif /* !__lint */
45__FBSDID("$FreeBSD: projects/makefs/ffs/ffs_alloc.c 186334 2008-12-19 18:45:43Z sam $");
48
49#include <sys/param.h>
50#include <sys/time.h>
51
52#include <errno.h>
53
54#include "makefs.h"
55
56#include <ufs/ufs/dinode.h>
57#include <ufs/ffs/fs.h>
58
59#include "ffs/ufs_bswap.h"
60#include "ffs/buf.h"
61#include "ffs/ufs_inode.h"
62#include "ffs/ffs_extern.h"
63
64static int scanc(u_int, const u_char *, const u_char *, int);
65
66static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
67static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
68static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
69 daddr_t (*)(struct inode *, int, daddr_t, int));
70static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
71
72/*
73 * Allocate a block in the file system.
74 *
75 * The size of the requested block is given, which must be some
76 * multiple of fs_fsize and <= fs_bsize.
77 * A preference may be optionally specified. If a preference is given
78 * the following hierarchy is used to allocate a block:
79 * 1) allocate the requested block.
80 * 2) allocate a rotationally optimal block in the same cylinder.
81 * 3) allocate a block in the same cylinder group.
82 * 4) quadradically rehash into other cylinder groups, until an
83 * available block is located.
84 * If no block preference is given the following hierarchy is used
85 * to allocate a block:
86 * 1) allocate a block in the cylinder group that contains the
87 * inode for the file.
88 * 2) quadradically rehash into other cylinder groups, until an
89 * available block is located.
90 */
91int
92ffs_alloc(struct inode *ip, daddr_t lbn, daddr_t bpref, int size,
93 daddr_t *bnp)
94{
95 struct fs *fs = ip->i_fs;
96 daddr_t bno;
97 int cg;
98
99 *bnp = 0;
100 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
101 errx(1, "ffs_alloc: bad size: bsize %d size %d",
102 fs->fs_bsize, size);
103 }
104 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
105 goto nospace;
106 if (bpref >= fs->fs_size)
107 bpref = 0;
108 if (bpref == 0)
109 cg = ino_to_cg(fs, ip->i_number);
110 else
111 cg = dtog(fs, bpref);
112 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
113 if (bno > 0) {
114 if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
115 ip->i_ffs1_blocks += size / DEV_BSIZE;
116 else
117 ip->i_ffs2_blocks += size / DEV_BSIZE;
118 *bnp = bno;
119 return (0);
120 }
121nospace:
122 return (ENOSPC);
123}
124
125/*
126 * Select the desired position for the next block in a file. The file is
127 * logically divided into sections. The first section is composed of the
128 * direct blocks. Each additional section contains fs_maxbpg blocks.
129 *
130 * If no blocks have been allocated in the first section, the policy is to
131 * request a block in the same cylinder group as the inode that describes
132 * the file. If no blocks have been allocated in any other section, the
133 * policy is to place the section in a cylinder group with a greater than
134 * average number of free blocks. An appropriate cylinder group is found
135 * by using a rotor that sweeps the cylinder groups. When a new group of
136 * blocks is needed, the sweep begins in the cylinder group following the
137 * cylinder group from which the previous allocation was made. The sweep
138 * continues until a cylinder group with greater than the average number
139 * of free blocks is found. If the allocation is for the first block in an
140 * indirect block, the information on the previous allocation is unavailable;
141 * here a best guess is made based upon the logical block number being
142 * allocated.
143 *
144 * If a section is already partially allocated, the policy is to
145 * contiguously allocate fs_maxcontig blocks. The end of one of these
146 * contiguous blocks and the beginning of the next is physically separated
147 * so that the disk head will be in transit between them for at least
148 * fs_rotdelay milliseconds. This is to allow time for the processor to
149 * schedule another I/O transfer.
150 */
151/* XXX ondisk32 */
152daddr_t
153ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
154{
155 struct fs *fs;
156 int cg;
157 int avgbfree, startcg;
158
159 fs = ip->i_fs;
160 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
161 if (lbn < NDADDR + NINDIR(fs)) {
162 cg = ino_to_cg(fs, ip->i_number);
163 return (fs->fs_fpg * cg + fs->fs_frag);
164 }
165 /*
166 * Find a cylinder with greater than average number of
167 * unused data blocks.
168 */
169 if (indx == 0 || bap[indx - 1] == 0)
170 startcg =
171 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
172 else
173 startcg = dtog(fs,
174 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
175 startcg %= fs->fs_ncg;
176 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
177 for (cg = startcg; cg < fs->fs_ncg; cg++)
178 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
179 return (fs->fs_fpg * cg + fs->fs_frag);
180 for (cg = 0; cg <= startcg; cg++)
181 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
182 return (fs->fs_fpg * cg + fs->fs_frag);
183 return (0);
184 }
185 /*
186 * We just always try to lay things out contiguously.
187 */
188 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
189}
190
191daddr_t
192ffs_blkpref_ufs2(ip, lbn, indx, bap)
193 struct inode *ip;
194 daddr_t lbn;
195 int indx;
196 int64_t *bap;
197{
198 struct fs *fs;
199 int cg;
200 int avgbfree, startcg;
201
202 fs = ip->i_fs;
203 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
204 if (lbn < NDADDR + NINDIR(fs)) {
205 cg = ino_to_cg(fs, ip->i_number);
206 return (fs->fs_fpg * cg + fs->fs_frag);
207 }
208 /*
209 * Find a cylinder with greater than average number of
210 * unused data blocks.
211 */
212 if (indx == 0 || bap[indx - 1] == 0)
213 startcg =
214 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
215 else
216 startcg = dtog(fs,
217 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
218 startcg %= fs->fs_ncg;
219 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
220 for (cg = startcg; cg < fs->fs_ncg; cg++)
221 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
222 return (fs->fs_fpg * cg + fs->fs_frag);
223 }
224 for (cg = 0; cg < startcg; cg++)
225 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
226 return (fs->fs_fpg * cg + fs->fs_frag);
227 }
228 return (0);
229 }
230 /*
231 * We just always try to lay things out contiguously.
232 */
233 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
234}
235
236/*
237 * Implement the cylinder overflow algorithm.
238 *
239 * The policy implemented by this algorithm is:
240 * 1) allocate the block in its requested cylinder group.
241 * 2) quadradically rehash on the cylinder group number.
242 * 3) brute force search for a free block.
243 *
244 * `size': size for data blocks, mode for inodes
245 */
246/*VARARGS5*/
247static daddr_t
248ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
249 daddr_t (*allocator)(struct inode *, int, daddr_t, int))
250{
251 struct fs *fs;
252 daddr_t result;
253 int i, icg = cg;
254
255 fs = ip->i_fs;
256 /*
257 * 1: preferred cylinder group
258 */
259 result = (*allocator)(ip, cg, pref, size);
260 if (result)
261 return (result);
262 /*
263 * 2: quadratic rehash
264 */
265 for (i = 1; i < fs->fs_ncg; i *= 2) {
266 cg += i;
267 if (cg >= fs->fs_ncg)
268 cg -= fs->fs_ncg;
269 result = (*allocator)(ip, cg, 0, size);
270 if (result)
271 return (result);
272 }
273 /*
274 * 3: brute force search
275 * Note that we start at i == 2, since 0 was checked initially,
276 * and 1 is always checked in the quadratic rehash.
277 */
278 cg = (icg + 2) % fs->fs_ncg;
279 for (i = 2; i < fs->fs_ncg; i++) {
280 result = (*allocator)(ip, cg, 0, size);
281 if (result)
282 return (result);
283 cg++;
284 if (cg == fs->fs_ncg)
285 cg = 0;
286 }
287 return (0);
288}
289
290/*
291 * Determine whether a block can be allocated.
292 *
293 * Check to see if a block of the appropriate size is available,
294 * and if it is, allocate it.
295 */
296static daddr_t
297ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
298{
299 struct cg *cgp;
300 struct buf *bp;
301 daddr_t bno, blkno;
302 int error, frags, allocsiz, i;
303 struct fs *fs = ip->i_fs;
304 const int needswap = UFS_FSNEEDSWAP(fs);
305
306 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
307 return (0);
308 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
309 (int)fs->fs_cgsize, &bp);
310 if (error) {
311 brelse(bp);
312 return (0);
313 }
314 cgp = (struct cg *)bp->b_data;
315 if (!cg_chkmagic_swap(cgp, needswap) ||
316 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
317 brelse(bp);
318 return (0);
319 }
320 if (size == fs->fs_bsize) {
321 bno = ffs_alloccgblk(ip, bp, bpref);
322 bdwrite(bp);
323 return (bno);
324 }
325 /*
326 * check to see if any fragments are already available
327 * allocsiz is the size which will be allocated, hacking
328 * it down to a smaller size if necessary
329 */
330 frags = numfrags(fs, size);
331 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
332 if (cgp->cg_frsum[allocsiz] != 0)
333 break;
334 if (allocsiz == fs->fs_frag) {
335 /*
336 * no fragments were available, so a block will be
337 * allocated, and hacked up
338 */
339 if (cgp->cg_cs.cs_nbfree == 0) {
340 brelse(bp);
341 return (0);
342 }
343 bno = ffs_alloccgblk(ip, bp, bpref);
344 bpref = dtogd(fs, bno);
345 for (i = frags; i < fs->fs_frag; i++)
346 setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
347 i = fs->fs_frag - frags;
348 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
349 fs->fs_cstotal.cs_nffree += i;
350 fs->fs_cs(fs, cg).cs_nffree += i;
351 fs->fs_fmod = 1;
352 ufs_add32(cgp->cg_frsum[i], 1, needswap);
353 bdwrite(bp);
354 return (bno);
355 }
356 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
357 for (i = 0; i < frags; i++)
358 clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
359 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
360 fs->fs_cstotal.cs_nffree -= frags;
361 fs->fs_cs(fs, cg).cs_nffree -= frags;
362 fs->fs_fmod = 1;
363 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
364 if (frags != allocsiz)
365 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
366 blkno = cg * fs->fs_fpg + bno;
367 bdwrite(bp);
368 return blkno;
369}
370
371/*
372 * Allocate a block in a cylinder group.
373 *
374 * This algorithm implements the following policy:
375 * 1) allocate the requested block.
376 * 2) allocate a rotationally optimal block in the same cylinder.
377 * 3) allocate the next available block on the block rotor for the
378 * specified cylinder group.
379 * Note that this routine only allocates fs_bsize blocks; these
380 * blocks may be fragmented by the routine that allocates them.
381 */
382static daddr_t
383ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
384{
385 struct cg *cgp;
386 daddr_t blkno;
387 int32_t bno;
388 struct fs *fs = ip->i_fs;
389 const int needswap = UFS_FSNEEDSWAP(fs);
390 u_int8_t *blksfree;
391
392 cgp = (struct cg *)bp->b_data;
393 blksfree = cg_blksfree_swap(cgp, needswap);
394 if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
395 bpref = ufs_rw32(cgp->cg_rotor, needswap);
396 } else {
397 bpref = blknum(fs, bpref);
398 bno = dtogd(fs, bpref);
399 /*
400 * if the requested block is available, use it
401 */
402 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
403 goto gotit;
404 }
405 /*
406 * Take the next available one in this cylinder group.
407 */
408 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
409 if (bno < 0)
410 return (0);
411 cgp->cg_rotor = ufs_rw32(bno, needswap);
412gotit:
413 blkno = fragstoblks(fs, bno);
414 ffs_clrblock(fs, blksfree, (long)blkno);
415 ffs_clusteracct(fs, cgp, blkno, -1);
416 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
417 fs->fs_cstotal.cs_nbfree--;
418 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
419 fs->fs_fmod = 1;
420 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
421 return (blkno);
422}
423
424/*
425 * Free a block or fragment.
426 *
427 * The specified block or fragment is placed back in the
428 * free map. If a fragment is deallocated, a possible
429 * block reassembly is checked.
430 */
431void
432ffs_blkfree(struct inode *ip, daddr_t bno, long size)
433{
434 struct cg *cgp;
435 struct buf *bp;
436 int32_t fragno, cgbno;
437 int i, error, cg, blk, frags, bbase;
438 struct fs *fs = ip->i_fs;
439 const int needswap = UFS_FSNEEDSWAP(fs);
440
441 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
442 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
443 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
444 (long long)bno, fs->fs_bsize, size);
445 }
446 cg = dtog(fs, bno);
447 if (bno >= fs->fs_size) {
448 warnx("bad block %lld, ino %d", (long long)bno, ip->i_number);
449 return;
450 }
451 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
452 (int)fs->fs_cgsize, &bp);
453 if (error) {
454 brelse(bp);
455 return;
456 }
457 cgp = (struct cg *)bp->b_data;
458 if (!cg_chkmagic_swap(cgp, needswap)) {
459 brelse(bp);
460 return;
461 }
462 cgbno = dtogd(fs, bno);
463 if (size == fs->fs_bsize) {
464 fragno = fragstoblks(fs, cgbno);
465 if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
466 errx(1, "blkfree: freeing free block %lld",
467 (long long)bno);
468 }
469 ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
470 ffs_clusteracct(fs, cgp, fragno, 1);
471 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
472 fs->fs_cstotal.cs_nbfree++;
473 fs->fs_cs(fs, cg).cs_nbfree++;
474 } else {
475 bbase = cgbno - fragnum(fs, cgbno);
476 /*
477 * decrement the counts associated with the old frags
478 */
479 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
480 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
481 /*
482 * deallocate the fragment
483 */
484 frags = numfrags(fs, size);
485 for (i = 0; i < frags; i++) {
486 if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
487 errx(1, "blkfree: freeing free frag: block %lld",
488 (long long)(cgbno + i));
489 }
490 setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
491 }
492 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
493 fs->fs_cstotal.cs_nffree += i;
494 fs->fs_cs(fs, cg).cs_nffree += i;
495 /*
496 * add back in counts associated with the new frags
497 */
498 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
499 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
500 /*
501 * if a complete block has been reassembled, account for it
502 */
503 fragno = fragstoblks(fs, bbase);
504 if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
505 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
506 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
507 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
508 ffs_clusteracct(fs, cgp, fragno, 1);
509 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
510 fs->fs_cstotal.cs_nbfree++;
511 fs->fs_cs(fs, cg).cs_nbfree++;
512 }
513 }
514 fs->fs_fmod = 1;
515 bdwrite(bp);
516}
517
518
519static int
520scanc(u_int size, const u_char *cp, const u_char table[], int mask)
521{
522 const u_char *end = &cp[size];
523
524 while (cp < end && (table[*cp] & mask) == 0)
525 cp++;
526 return (end - cp);
527}
528
529/*
530 * Find a block of the specified size in the specified cylinder group.
531 *
532 * It is a panic if a request is made to find a block if none are
533 * available.
534 */
535static int32_t
536ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
537{
538 int32_t bno;
539 int start, len, loc, i;
540 int blk, field, subfield, pos;
541 int ostart, olen;
542 const int needswap = UFS_FSNEEDSWAP(fs);
543
544 /*
545 * find the fragment by searching through the free block
546 * map for an appropriate bit pattern
547 */
548 if (bpref)
549 start = dtogd(fs, bpref) / NBBY;
550 else
551 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
552 len = howmany(fs->fs_fpg, NBBY) - start;
553 ostart = start;
554 olen = len;
555 loc = scanc((u_int)len,
556 (const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
557 (const u_char *)fragtbl[fs->fs_frag],
558 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
559 if (loc == 0) {
560 len = start + 1;
561 start = 0;
562 loc = scanc((u_int)len,
563 (const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
564 (const u_char *)fragtbl[fs->fs_frag],
565 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
566 if (loc == 0) {
567 errx(1,
568 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
569 ostart, olen,
570 ufs_rw32(cgp->cg_freeoff, needswap),
571 (long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
572 /* NOTREACHED */
573 }
574 }
575 bno = (start + len - loc) * NBBY;
576 cgp->cg_frotor = ufs_rw32(bno, needswap);
577 /*
578 * found the byte in the map
579 * sift through the bits to find the selected frag
580 */
581 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
582 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
583 blk <<= 1;
584 field = around[allocsiz];
585 subfield = inside[allocsiz];
586 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
587 if ((blk & field) == subfield)
588 return (bno + pos);
589 field <<= 1;
590 subfield <<= 1;
591 }
592 }
593 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
594 return (-1);
595}
596
597/*
598 * Update the cluster map because of an allocation or free.
599 *
600 * Cnt == 1 means free; cnt == -1 means allocating.
601 */
602void
603ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
604{
605 int32_t *sump;
606 int32_t *lp;
607 u_char *freemapp, *mapp;
608 int i, start, end, forw, back, map, bit;
609 const int needswap = UFS_FSNEEDSWAP(fs);
610
611 if (fs->fs_contigsumsize <= 0)
612 return;
613 freemapp = cg_clustersfree_swap(cgp, needswap);
614 sump = cg_clustersum_swap(cgp, needswap);
615 /*
616 * Allocate or clear the actual block.
617 */
618 if (cnt > 0)
619 setbit(freemapp, blkno);
620 else
621 clrbit(freemapp, blkno);
622 /*
623 * Find the size of the cluster going forward.
624 */
625 start = blkno + 1;
626 end = start + fs->fs_contigsumsize;
627 if (end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
628 end = ufs_rw32(cgp->cg_nclusterblks, needswap);
629 mapp = &freemapp[start / NBBY];
630 map = *mapp++;
631 bit = 1 << (start % NBBY);
632 for (i = start; i < end; i++) {
633 if ((map & bit) == 0)
634 break;
635 if ((i & (NBBY - 1)) != (NBBY - 1)) {
636 bit <<= 1;
637 } else {
638 map = *mapp++;
639 bit = 1;
640 }
641 }
642 forw = i - start;
643 /*
644 * Find the size of the cluster going backward.
645 */
646 start = blkno - 1;
647 end = start - fs->fs_contigsumsize;
648 if (end < 0)
649 end = -1;
650 mapp = &freemapp[start / NBBY];
651 map = *mapp--;
652 bit = 1 << (start % NBBY);
653 for (i = start; i > end; i--) {
654 if ((map & bit) == 0)
655 break;
656 if ((i & (NBBY - 1)) != 0) {
657 bit >>= 1;
658 } else {
659 map = *mapp--;
660 bit = 1 << (NBBY - 1);
661 }
662 }
663 back = start - i;
664 /*
665 * Account for old cluster and the possibly new forward and
666 * back clusters.
667 */
668 i = back + forw + 1;
669 if (i > fs->fs_contigsumsize)
670 i = fs->fs_contigsumsize;
671 ufs_add32(sump[i], cnt, needswap);
672 if (back > 0)
673 ufs_add32(sump[back], -cnt, needswap);
674 if (forw > 0)
675 ufs_add32(sump[forw], -cnt, needswap);
676
677 /*
678 * Update cluster summary information.
679 */
680 lp = &sump[fs->fs_contigsumsize];
681 for (i = fs->fs_contigsumsize; i > 0; i--)
682 if (ufs_rw32(*lp--, needswap) > 0)
683 break;
684 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
685}
46
47#include <sys/param.h>
48#include <sys/time.h>
49
50#include <errno.h>
51
52#include "makefs.h"
53
54#include <ufs/ufs/dinode.h>
55#include <ufs/ffs/fs.h>
56
57#include "ffs/ufs_bswap.h"
58#include "ffs/buf.h"
59#include "ffs/ufs_inode.h"
60#include "ffs/ffs_extern.h"
61
62static int scanc(u_int, const u_char *, const u_char *, int);
63
64static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
65static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
66static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
67 daddr_t (*)(struct inode *, int, daddr_t, int));
68static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
69
70/*
71 * Allocate a block in the file system.
72 *
73 * The size of the requested block is given, which must be some
74 * multiple of fs_fsize and <= fs_bsize.
75 * A preference may be optionally specified. If a preference is given
76 * the following hierarchy is used to allocate a block:
77 * 1) allocate the requested block.
78 * 2) allocate a rotationally optimal block in the same cylinder.
79 * 3) allocate a block in the same cylinder group.
80 * 4) quadradically rehash into other cylinder groups, until an
81 * available block is located.
82 * If no block preference is given the following hierarchy is used
83 * to allocate a block:
84 * 1) allocate a block in the cylinder group that contains the
85 * inode for the file.
86 * 2) quadradically rehash into other cylinder groups, until an
87 * available block is located.
88 */
89int
90ffs_alloc(struct inode *ip, daddr_t lbn, daddr_t bpref, int size,
91 daddr_t *bnp)
92{
93 struct fs *fs = ip->i_fs;
94 daddr_t bno;
95 int cg;
96
97 *bnp = 0;
98 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
99 errx(1, "ffs_alloc: bad size: bsize %d size %d",
100 fs->fs_bsize, size);
101 }
102 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
103 goto nospace;
104 if (bpref >= fs->fs_size)
105 bpref = 0;
106 if (bpref == 0)
107 cg = ino_to_cg(fs, ip->i_number);
108 else
109 cg = dtog(fs, bpref);
110 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
111 if (bno > 0) {
112 if (ip->i_fs->fs_magic == FS_UFS1_MAGIC)
113 ip->i_ffs1_blocks += size / DEV_BSIZE;
114 else
115 ip->i_ffs2_blocks += size / DEV_BSIZE;
116 *bnp = bno;
117 return (0);
118 }
119nospace:
120 return (ENOSPC);
121}
122
123/*
124 * Select the desired position for the next block in a file. The file is
125 * logically divided into sections. The first section is composed of the
126 * direct blocks. Each additional section contains fs_maxbpg blocks.
127 *
128 * If no blocks have been allocated in the first section, the policy is to
129 * request a block in the same cylinder group as the inode that describes
130 * the file. If no blocks have been allocated in any other section, the
131 * policy is to place the section in a cylinder group with a greater than
132 * average number of free blocks. An appropriate cylinder group is found
133 * by using a rotor that sweeps the cylinder groups. When a new group of
134 * blocks is needed, the sweep begins in the cylinder group following the
135 * cylinder group from which the previous allocation was made. The sweep
136 * continues until a cylinder group with greater than the average number
137 * of free blocks is found. If the allocation is for the first block in an
138 * indirect block, the information on the previous allocation is unavailable;
139 * here a best guess is made based upon the logical block number being
140 * allocated.
141 *
142 * If a section is already partially allocated, the policy is to
143 * contiguously allocate fs_maxcontig blocks. The end of one of these
144 * contiguous blocks and the beginning of the next is physically separated
145 * so that the disk head will be in transit between them for at least
146 * fs_rotdelay milliseconds. This is to allow time for the processor to
147 * schedule another I/O transfer.
148 */
149/* XXX ondisk32 */
150daddr_t
151ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
152{
153 struct fs *fs;
154 int cg;
155 int avgbfree, startcg;
156
157 fs = ip->i_fs;
158 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
159 if (lbn < NDADDR + NINDIR(fs)) {
160 cg = ino_to_cg(fs, ip->i_number);
161 return (fs->fs_fpg * cg + fs->fs_frag);
162 }
163 /*
164 * Find a cylinder with greater than average number of
165 * unused data blocks.
166 */
167 if (indx == 0 || bap[indx - 1] == 0)
168 startcg =
169 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
170 else
171 startcg = dtog(fs,
172 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
173 startcg %= fs->fs_ncg;
174 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
175 for (cg = startcg; cg < fs->fs_ncg; cg++)
176 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
177 return (fs->fs_fpg * cg + fs->fs_frag);
178 for (cg = 0; cg <= startcg; cg++)
179 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
180 return (fs->fs_fpg * cg + fs->fs_frag);
181 return (0);
182 }
183 /*
184 * We just always try to lay things out contiguously.
185 */
186 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
187}
188
189daddr_t
190ffs_blkpref_ufs2(ip, lbn, indx, bap)
191 struct inode *ip;
192 daddr_t lbn;
193 int indx;
194 int64_t *bap;
195{
196 struct fs *fs;
197 int cg;
198 int avgbfree, startcg;
199
200 fs = ip->i_fs;
201 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
202 if (lbn < NDADDR + NINDIR(fs)) {
203 cg = ino_to_cg(fs, ip->i_number);
204 return (fs->fs_fpg * cg + fs->fs_frag);
205 }
206 /*
207 * Find a cylinder with greater than average number of
208 * unused data blocks.
209 */
210 if (indx == 0 || bap[indx - 1] == 0)
211 startcg =
212 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
213 else
214 startcg = dtog(fs,
215 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
216 startcg %= fs->fs_ncg;
217 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
218 for (cg = startcg; cg < fs->fs_ncg; cg++)
219 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
220 return (fs->fs_fpg * cg + fs->fs_frag);
221 }
222 for (cg = 0; cg < startcg; cg++)
223 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
224 return (fs->fs_fpg * cg + fs->fs_frag);
225 }
226 return (0);
227 }
228 /*
229 * We just always try to lay things out contiguously.
230 */
231 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
232}
233
234/*
235 * Implement the cylinder overflow algorithm.
236 *
237 * The policy implemented by this algorithm is:
238 * 1) allocate the block in its requested cylinder group.
239 * 2) quadradically rehash on the cylinder group number.
240 * 3) brute force search for a free block.
241 *
242 * `size': size for data blocks, mode for inodes
243 */
244/*VARARGS5*/
245static daddr_t
246ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
247 daddr_t (*allocator)(struct inode *, int, daddr_t, int))
248{
249 struct fs *fs;
250 daddr_t result;
251 int i, icg = cg;
252
253 fs = ip->i_fs;
254 /*
255 * 1: preferred cylinder group
256 */
257 result = (*allocator)(ip, cg, pref, size);
258 if (result)
259 return (result);
260 /*
261 * 2: quadratic rehash
262 */
263 for (i = 1; i < fs->fs_ncg; i *= 2) {
264 cg += i;
265 if (cg >= fs->fs_ncg)
266 cg -= fs->fs_ncg;
267 result = (*allocator)(ip, cg, 0, size);
268 if (result)
269 return (result);
270 }
271 /*
272 * 3: brute force search
273 * Note that we start at i == 2, since 0 was checked initially,
274 * and 1 is always checked in the quadratic rehash.
275 */
276 cg = (icg + 2) % fs->fs_ncg;
277 for (i = 2; i < fs->fs_ncg; i++) {
278 result = (*allocator)(ip, cg, 0, size);
279 if (result)
280 return (result);
281 cg++;
282 if (cg == fs->fs_ncg)
283 cg = 0;
284 }
285 return (0);
286}
287
288/*
289 * Determine whether a block can be allocated.
290 *
291 * Check to see if a block of the appropriate size is available,
292 * and if it is, allocate it.
293 */
294static daddr_t
295ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
296{
297 struct cg *cgp;
298 struct buf *bp;
299 daddr_t bno, blkno;
300 int error, frags, allocsiz, i;
301 struct fs *fs = ip->i_fs;
302 const int needswap = UFS_FSNEEDSWAP(fs);
303
304 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
305 return (0);
306 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
307 (int)fs->fs_cgsize, &bp);
308 if (error) {
309 brelse(bp);
310 return (0);
311 }
312 cgp = (struct cg *)bp->b_data;
313 if (!cg_chkmagic_swap(cgp, needswap) ||
314 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
315 brelse(bp);
316 return (0);
317 }
318 if (size == fs->fs_bsize) {
319 bno = ffs_alloccgblk(ip, bp, bpref);
320 bdwrite(bp);
321 return (bno);
322 }
323 /*
324 * check to see if any fragments are already available
325 * allocsiz is the size which will be allocated, hacking
326 * it down to a smaller size if necessary
327 */
328 frags = numfrags(fs, size);
329 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
330 if (cgp->cg_frsum[allocsiz] != 0)
331 break;
332 if (allocsiz == fs->fs_frag) {
333 /*
334 * no fragments were available, so a block will be
335 * allocated, and hacked up
336 */
337 if (cgp->cg_cs.cs_nbfree == 0) {
338 brelse(bp);
339 return (0);
340 }
341 bno = ffs_alloccgblk(ip, bp, bpref);
342 bpref = dtogd(fs, bno);
343 for (i = frags; i < fs->fs_frag; i++)
344 setbit(cg_blksfree_swap(cgp, needswap), bpref + i);
345 i = fs->fs_frag - frags;
346 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
347 fs->fs_cstotal.cs_nffree += i;
348 fs->fs_cs(fs, cg).cs_nffree += i;
349 fs->fs_fmod = 1;
350 ufs_add32(cgp->cg_frsum[i], 1, needswap);
351 bdwrite(bp);
352 return (bno);
353 }
354 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
355 for (i = 0; i < frags; i++)
356 clrbit(cg_blksfree_swap(cgp, needswap), bno + i);
357 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
358 fs->fs_cstotal.cs_nffree -= frags;
359 fs->fs_cs(fs, cg).cs_nffree -= frags;
360 fs->fs_fmod = 1;
361 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
362 if (frags != allocsiz)
363 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
364 blkno = cg * fs->fs_fpg + bno;
365 bdwrite(bp);
366 return blkno;
367}
368
369/*
370 * Allocate a block in a cylinder group.
371 *
372 * This algorithm implements the following policy:
373 * 1) allocate the requested block.
374 * 2) allocate a rotationally optimal block in the same cylinder.
375 * 3) allocate the next available block on the block rotor for the
376 * specified cylinder group.
377 * Note that this routine only allocates fs_bsize blocks; these
378 * blocks may be fragmented by the routine that allocates them.
379 */
380static daddr_t
381ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
382{
383 struct cg *cgp;
384 daddr_t blkno;
385 int32_t bno;
386 struct fs *fs = ip->i_fs;
387 const int needswap = UFS_FSNEEDSWAP(fs);
388 u_int8_t *blksfree;
389
390 cgp = (struct cg *)bp->b_data;
391 blksfree = cg_blksfree_swap(cgp, needswap);
392 if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
393 bpref = ufs_rw32(cgp->cg_rotor, needswap);
394 } else {
395 bpref = blknum(fs, bpref);
396 bno = dtogd(fs, bpref);
397 /*
398 * if the requested block is available, use it
399 */
400 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
401 goto gotit;
402 }
403 /*
404 * Take the next available one in this cylinder group.
405 */
406 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
407 if (bno < 0)
408 return (0);
409 cgp->cg_rotor = ufs_rw32(bno, needswap);
410gotit:
411 blkno = fragstoblks(fs, bno);
412 ffs_clrblock(fs, blksfree, (long)blkno);
413 ffs_clusteracct(fs, cgp, blkno, -1);
414 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
415 fs->fs_cstotal.cs_nbfree--;
416 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
417 fs->fs_fmod = 1;
418 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
419 return (blkno);
420}
421
422/*
423 * Free a block or fragment.
424 *
425 * The specified block or fragment is placed back in the
426 * free map. If a fragment is deallocated, a possible
427 * block reassembly is checked.
428 */
429void
430ffs_blkfree(struct inode *ip, daddr_t bno, long size)
431{
432 struct cg *cgp;
433 struct buf *bp;
434 int32_t fragno, cgbno;
435 int i, error, cg, blk, frags, bbase;
436 struct fs *fs = ip->i_fs;
437 const int needswap = UFS_FSNEEDSWAP(fs);
438
439 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
440 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
441 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
442 (long long)bno, fs->fs_bsize, size);
443 }
444 cg = dtog(fs, bno);
445 if (bno >= fs->fs_size) {
446 warnx("bad block %lld, ino %d", (long long)bno, ip->i_number);
447 return;
448 }
449 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
450 (int)fs->fs_cgsize, &bp);
451 if (error) {
452 brelse(bp);
453 return;
454 }
455 cgp = (struct cg *)bp->b_data;
456 if (!cg_chkmagic_swap(cgp, needswap)) {
457 brelse(bp);
458 return;
459 }
460 cgbno = dtogd(fs, bno);
461 if (size == fs->fs_bsize) {
462 fragno = fragstoblks(fs, cgbno);
463 if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
464 errx(1, "blkfree: freeing free block %lld",
465 (long long)bno);
466 }
467 ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno);
468 ffs_clusteracct(fs, cgp, fragno, 1);
469 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
470 fs->fs_cstotal.cs_nbfree++;
471 fs->fs_cs(fs, cg).cs_nbfree++;
472 } else {
473 bbase = cgbno - fragnum(fs, cgbno);
474 /*
475 * decrement the counts associated with the old frags
476 */
477 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
478 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap);
479 /*
480 * deallocate the fragment
481 */
482 frags = numfrags(fs, size);
483 for (i = 0; i < frags; i++) {
484 if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) {
485 errx(1, "blkfree: freeing free frag: block %lld",
486 (long long)(cgbno + i));
487 }
488 setbit(cg_blksfree_swap(cgp, needswap), cgbno + i);
489 }
490 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
491 fs->fs_cstotal.cs_nffree += i;
492 fs->fs_cs(fs, cg).cs_nffree += i;
493 /*
494 * add back in counts associated with the new frags
495 */
496 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase);
497 ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap);
498 /*
499 * if a complete block has been reassembled, account for it
500 */
501 fragno = fragstoblks(fs, bbase);
502 if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) {
503 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
504 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
505 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
506 ffs_clusteracct(fs, cgp, fragno, 1);
507 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
508 fs->fs_cstotal.cs_nbfree++;
509 fs->fs_cs(fs, cg).cs_nbfree++;
510 }
511 }
512 fs->fs_fmod = 1;
513 bdwrite(bp);
514}
515
516
517static int
518scanc(u_int size, const u_char *cp, const u_char table[], int mask)
519{
520 const u_char *end = &cp[size];
521
522 while (cp < end && (table[*cp] & mask) == 0)
523 cp++;
524 return (end - cp);
525}
526
527/*
528 * Find a block of the specified size in the specified cylinder group.
529 *
530 * It is a panic if a request is made to find a block if none are
531 * available.
532 */
533static int32_t
534ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
535{
536 int32_t bno;
537 int start, len, loc, i;
538 int blk, field, subfield, pos;
539 int ostart, olen;
540 const int needswap = UFS_FSNEEDSWAP(fs);
541
542 /*
543 * find the fragment by searching through the free block
544 * map for an appropriate bit pattern
545 */
546 if (bpref)
547 start = dtogd(fs, bpref) / NBBY;
548 else
549 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
550 len = howmany(fs->fs_fpg, NBBY) - start;
551 ostart = start;
552 olen = len;
553 loc = scanc((u_int)len,
554 (const u_char *)&cg_blksfree_swap(cgp, needswap)[start],
555 (const u_char *)fragtbl[fs->fs_frag],
556 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
557 if (loc == 0) {
558 len = start + 1;
559 start = 0;
560 loc = scanc((u_int)len,
561 (const u_char *)&cg_blksfree_swap(cgp, needswap)[0],
562 (const u_char *)fragtbl[fs->fs_frag],
563 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
564 if (loc == 0) {
565 errx(1,
566 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
567 ostart, olen,
568 ufs_rw32(cgp->cg_freeoff, needswap),
569 (long)cg_blksfree_swap(cgp, needswap) - (long)cgp);
570 /* NOTREACHED */
571 }
572 }
573 bno = (start + len - loc) * NBBY;
574 cgp->cg_frotor = ufs_rw32(bno, needswap);
575 /*
576 * found the byte in the map
577 * sift through the bits to find the selected frag
578 */
579 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
580 blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bno);
581 blk <<= 1;
582 field = around[allocsiz];
583 subfield = inside[allocsiz];
584 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
585 if ((blk & field) == subfield)
586 return (bno + pos);
587 field <<= 1;
588 subfield <<= 1;
589 }
590 }
591 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
592 return (-1);
593}
594
595/*
596 * Update the cluster map because of an allocation or free.
597 *
598 * Cnt == 1 means free; cnt == -1 means allocating.
599 */
600void
601ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
602{
603 int32_t *sump;
604 int32_t *lp;
605 u_char *freemapp, *mapp;
606 int i, start, end, forw, back, map, bit;
607 const int needswap = UFS_FSNEEDSWAP(fs);
608
609 if (fs->fs_contigsumsize <= 0)
610 return;
611 freemapp = cg_clustersfree_swap(cgp, needswap);
612 sump = cg_clustersum_swap(cgp, needswap);
613 /*
614 * Allocate or clear the actual block.
615 */
616 if (cnt > 0)
617 setbit(freemapp, blkno);
618 else
619 clrbit(freemapp, blkno);
620 /*
621 * Find the size of the cluster going forward.
622 */
623 start = blkno + 1;
624 end = start + fs->fs_contigsumsize;
625 if (end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
626 end = ufs_rw32(cgp->cg_nclusterblks, needswap);
627 mapp = &freemapp[start / NBBY];
628 map = *mapp++;
629 bit = 1 << (start % NBBY);
630 for (i = start; i < end; i++) {
631 if ((map & bit) == 0)
632 break;
633 if ((i & (NBBY - 1)) != (NBBY - 1)) {
634 bit <<= 1;
635 } else {
636 map = *mapp++;
637 bit = 1;
638 }
639 }
640 forw = i - start;
641 /*
642 * Find the size of the cluster going backward.
643 */
644 start = blkno - 1;
645 end = start - fs->fs_contigsumsize;
646 if (end < 0)
647 end = -1;
648 mapp = &freemapp[start / NBBY];
649 map = *mapp--;
650 bit = 1 << (start % NBBY);
651 for (i = start; i > end; i--) {
652 if ((map & bit) == 0)
653 break;
654 if ((i & (NBBY - 1)) != 0) {
655 bit >>= 1;
656 } else {
657 map = *mapp--;
658 bit = 1 << (NBBY - 1);
659 }
660 }
661 back = start - i;
662 /*
663 * Account for old cluster and the possibly new forward and
664 * back clusters.
665 */
666 i = back + forw + 1;
667 if (i > fs->fs_contigsumsize)
668 i = fs->fs_contigsumsize;
669 ufs_add32(sump[i], cnt, needswap);
670 if (back > 0)
671 ufs_add32(sump[back], -cnt, needswap);
672 if (forw > 0)
673 ufs_add32(sump[forw], -cnt, needswap);
674
675 /*
676 * Update cluster summary information.
677 */
678 lp = &sump[fs->fs_contigsumsize];
679 for (i = fs->fs_contigsumsize; i > 0; i--)
680 if (ufs_rw32(*lp--, needswap) > 0)
681 break;
682 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;
683}