Deleted Added
sdiff udiff text old ( 249218 ) new ( 251612 )
full compact
1/*-
2 * modified for Lites 1.1
3 *
4 * Aug 1995, Godmar Back (gback@cs.utah.edu)
5 * University of Utah, Department of Computer Science
6 */
7/*-
8 * Copyright (c) 1982, 1986, 1989, 1993
9 * The Regents of the University of California. All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94
36 * $FreeBSD: head/sys/fs/ext2fs/ext2_alloc.c 249218 2013-04-06 22:21:23Z jeff $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/conf.h>
42#include <sys/vnode.h>
43#include <sys/stat.h>
44#include <sys/mount.h>
45#include <sys/sysctl.h>
46#include <sys/syslog.h>
47#include <sys/buf.h>
48
49#include <fs/ext2fs/inode.h>
50#include <fs/ext2fs/ext2_mount.h>
51#include <fs/ext2fs/ext2fs.h>
52#include <fs/ext2fs/fs.h>
53#include <fs/ext2fs/ext2_extern.h>
54
55static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int);
56static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int);
57static u_long ext2_dirpref(struct inode *);
58static void ext2_fserr(struct m_ext2fs *, uid_t, char *);
59static u_long ext2_hashalloc(struct inode *, int, long, int,
60 daddr_t (*)(struct inode *, int, daddr_t,
61 int));
62static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int);
63static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t);
64
65/*
66 * Allocate a block in the file system.
67 *
68 * A preference may be optionally specified. If a preference is given
69 * the following hierarchy is used to allocate a block:
70 * 1) allocate the requested block.
71 * 2) allocate a rotationally optimal block in the same cylinder.
72 * 3) allocate a block in the same cylinder group.
73 * 4) quadradically rehash into other cylinder groups, until an
74 * available block is located.
75 * If no block preference is given the following hierarchy is used
76 * to allocate a block:
77 * 1) allocate a block in the cylinder group that contains the
78 * inode for the file.
79 * 2) quadradically rehash into other cylinder groups, until an
80 * available block is located.
81 */
82int
83ext2_alloc(struct inode *ip, int32_t lbn, int32_t bpref, int size,
84 struct ucred *cred, int32_t *bnp)
85{
86 struct m_ext2fs *fs;
87 struct ext2mount *ump;
88 int32_t bno;
89 int cg;
90 *bnp = 0;
91 fs = ip->i_e2fs;
92 ump = ip->i_ump;
93 mtx_assert(EXT2_MTX(ump), MA_OWNED);
94#ifdef DIAGNOSTIC
95 if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) {
96 vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n",
97 (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt);
98 panic("ext2_alloc: bad size");
99 }
100 if (cred == NOCRED)
101 panic("ext2_alloc: missing credential");
102#endif /* DIAGNOSTIC */
103 if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0)
104 goto nospace;
105 if (cred->cr_uid != 0 &&
106 fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount)
107 goto nospace;
108 if (bpref >= fs->e2fs->e2fs_bcount)
109 bpref = 0;
110 if (bpref == 0)
111 cg = ino_to_cg(fs, ip->i_number);
112 else
113 cg = dtog(fs, bpref);
114 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize,
115 ext2_alloccg);
116 if (bno > 0) {
117 /* set next_alloc fields as done in block_getblk */
118 ip->i_next_alloc_block = lbn;
119 ip->i_next_alloc_goal = bno;
120
121 ip->i_blocks += btodb(fs->e2fs_bsize);
122 ip->i_flag |= IN_CHANGE | IN_UPDATE;
123 *bnp = bno;
124 return (0);
125 }
126nospace:
127 EXT2_UNLOCK(ump);
128 ext2_fserr(fs, cred->cr_uid, "file system full");
129 uprintf("\n%s: write failed, file system is full\n", fs->e2fs_fsmnt);
130 return (ENOSPC);
131}
132
133/*
134 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
135 *
136 * The vnode and an array of buffer pointers for a range of sequential
137 * logical blocks to be made contiguous is given. The allocator attempts
138 * to find a range of sequential blocks starting as close as possible to
139 * an fs_rotdelay offset from the end of the allocation for the logical
140 * block immediately preceding the current range. If successful, the
141 * physical block numbers in the buffer pointers and in the inode are
142 * changed to reflect the new allocation. If unsuccessful, the allocation
143 * is left unchanged. The success in doing the reallocation is returned.
144 * Note that the error return is not reflected back to the user. Rather
145 * the previous block allocation will be used.
146 */
147
148static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem");
149
150static int doasyncfree = 0;
151SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
152 "Use asychronous writes to update block pointers when freeing blocks");
153
154static int doreallocblks = 0;
155SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
156
157int
158ext2_reallocblks(struct vop_reallocblks_args *ap)
159{
160 struct m_ext2fs *fs;
161 struct inode *ip;
162 struct vnode *vp;
163 struct buf *sbp, *ebp;
164 uint32_t *bap, *sbap, *ebap = 0;
165 struct ext2mount *ump;
166 struct cluster_save *buflist;
167 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
168 int32_t start_lbn, end_lbn, soff, newblk, blkno;
169 int i, len, start_lvl, end_lvl, pref, ssize;
170
171 if (doreallocblks == 0)
172 return (ENOSPC);
173
174 vp = ap->a_vp;
175 ip = VTOI(vp);
176 fs = ip->i_e2fs;
177 ump = ip->i_ump;
178
179 if (fs->e2fs_contigsumsize <= 0)
180 return (ENOSPC);
181
182 buflist = ap->a_buflist;
183 len = buflist->bs_nchildren;
184 start_lbn = buflist->bs_children[0]->b_lblkno;
185 end_lbn = start_lbn + len - 1;
186#ifdef DIAGNOSTIC
187 for (i = 1; i < len; i++)
188 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
189 panic("ext2_reallocblks: non-cluster");
190#endif
191 /*
192 * If the cluster crosses the boundary for the first indirect
193 * block, leave space for the indirect block. Indirect blocks
194 * are initially laid out in a position after the last direct
195 * block. Block reallocation would usually destroy locality by
196 * moving the indirect block out of the way to make room for
197 * data blocks if we didn't compensate here. We should also do
198 * this for other indirect block boundaries, but it is only
199 * important for the first one.
200 */
201 if (start_lbn < NDADDR && end_lbn >= NDADDR)
202 return (ENOSPC);
203 /*
204 * If the latest allocation is in a new cylinder group, assume that
205 * the filesystem has decided to move and do not force it back to
206 * the previous cylinder group.
207 */
208 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
209 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
210 return (ENOSPC);
211 if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
212 ext2_getlbns(vp, end_lbn, end_ap, &end_lvl))
213 return (ENOSPC);
214 /*
215 * Get the starting offset and block map for the first block.
216 */
217 if (start_lvl == 0) {
218 sbap = &ip->i_db[0];
219 soff = start_lbn;
220 } else {
221 idp = &start_ap[start_lvl - 1];
222 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) {
223 brelse(sbp);
224 return (ENOSPC);
225 }
226 sbap = (int32_t *)sbp->b_data;
227 soff = idp->in_off;
228 }
229 /*
230 * If the block range spans two block maps, get the second map.
231 */
232 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
233 ssize = len;
234 } else {
235#ifdef DIAGNOSTIC
236 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn)
237 panic("ext2_reallocblks: start == end");
238#endif
239 ssize = len - (idp->in_off + 1);
240 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp))
241 goto fail;
242 ebap = (int32_t *)ebp->b_data;
243 }
244 /*
245 * Find the preferred location for the cluster.
246 */
247 EXT2_LOCK(ump);
248 pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0);
249 /*
250 * Search the block map looking for an allocation of the desired size.
251 */
252 if ((newblk = (int32_t)ext2_hashalloc(ip, dtog(fs, pref), pref,
253 len, ext2_clusteralloc)) == 0){
254 EXT2_UNLOCK(ump);
255 goto fail;
256 }
257 /*
258 * We have found a new contiguous block.
259 *
260 * First we have to replace the old block pointers with the new
261 * block pointers in the inode and indirect blocks associated
262 * with the file.
263 */
264#ifdef DEBUG
265 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
266 (intmax_t)start_lbn, (intmax_t)end_lbn);
267#endif /* DEBUG */
268 blkno = newblk;
269 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
270 if (i == ssize) {
271 bap = ebap;
272 soff = -i;
273 }
274#ifdef DIAGNOSTIC
275 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap))
276 panic("ext2_reallocblks: alloc mismatch");
277#endif
278#ifdef DEBUG
279 printf(" %d,", *bap);
280#endif /* DEBUG */
281 *bap++ = blkno;
282 }
283 /*
284 * Next we must write out the modified inode and indirect blocks.
285 * For strict correctness, the writes should be synchronous since
286 * the old block values may have been written to disk. In practise
287 * they are almost never written, but if we are concerned about
288 * strict correctness, the `doasyncfree' flag should be set to zero.
289 *
290 * The test on `doasyncfree' should be changed to test a flag
291 * that shows whether the associated buffers and inodes have
292 * been written. The flag should be set when the cluster is
293 * started and cleared whenever the buffer or inode is flushed.
294 * We can then check below to see if it is set, and do the
295 * synchronous write only when it has been cleared.
296 */
297 if (sbap != &ip->i_db[0]) {
298 if (doasyncfree)
299 bdwrite(sbp);
300 else
301 bwrite(sbp);
302 } else {
303 ip->i_flag |= IN_CHANGE | IN_UPDATE;
304 if (!doasyncfree)
305 ext2_update(vp, 1);
306 }
307 if (ssize < len) {
308 if (doasyncfree)
309 bdwrite(ebp);
310 else
311 bwrite(ebp);
312 }
313 /*
314 * Last, free the old blocks and assign the new blocks to the buffers.
315 */
316#ifdef DEBUG
317 printf("\n\tnew:");
318#endif /* DEBUG */
319 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
320 ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno),
321 fs->e2fs_bsize);
322 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
323#ifdef DEBUG
324 printf(" %d,", blkno);
325#endif /* DEBUG */
326 }
327#ifdef DEBUG
328 printf("\n");
329#endif /* DEBUG */
330 return (0);
331
332fail:
333 if (ssize < len)
334 brelse(ebp);
335 if (sbap != &ip->i_db[0])
336 brelse(sbp);
337 return (ENOSPC);
338}
339
340/*
341 * Allocate an inode in the file system.
342 *
343 */
344int
345ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp)
346{
347 struct timespec ts;
348 struct inode *pip;
349 struct m_ext2fs *fs;
350 struct inode *ip;
351 struct ext2mount *ump;
352 ino_t ino, ipref;
353 int i, error, cg;
354
355 *vpp = NULL;
356 pip = VTOI(pvp);
357 fs = pip->i_e2fs;
358 ump = pip->i_ump;
359
360 EXT2_LOCK(ump);
361 if (fs->e2fs->e2fs_ficount == 0)
362 goto noinodes;
363 /*
364 * If it is a directory then obtain a cylinder group based on
365 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is
366 * always the next inode.
367 */
368 if ((mode & IFMT) == IFDIR) {
369 cg = ext2_dirpref(pip);
370 if (fs->e2fs_contigdirs[cg] < 255)
371 fs->e2fs_contigdirs[cg]++;
372 } else {
373 cg = ino_to_cg(fs, pip->i_number);
374 if (fs->e2fs_contigdirs[cg] > 0)
375 fs->e2fs_contigdirs[cg]--;
376 }
377 ipref = cg * fs->e2fs->e2fs_ipg + 1;
378 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg);
379
380 if (ino == 0)
381 goto noinodes;
382 error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
383 if (error) {
384 ext2_vfree(pvp, ino, mode);
385 return (error);
386 }
387 ip = VTOI(*vpp);
388
389 /*
390 * The question is whether using VGET was such good idea at all:
391 * Linux doesn't read the old inode in when it is allocating a
392 * new one. I will set at least i_size and i_blocks to zero.
393 */
394 ip->i_size = 0;
395 ip->i_blocks = 0;
396 ip->i_mode = 0;
397 ip->i_flags = 0;
398 /* now we want to make sure that the block pointers are zeroed out */
399 for (i = 0; i < NDADDR; i++)
400 ip->i_db[i] = 0;
401 for (i = 0; i < NIADDR; i++)
402 ip->i_ib[i] = 0;
403
404 /*
405 * Set up a new generation number for this inode.
406 * XXX check if this makes sense in ext2
407 */
408 if (ip->i_gen == 0 || ++ip->i_gen == 0)
409 ip->i_gen = random() / 2 + 1;
410
411 vfs_timestamp(&ts);
412 ip->i_birthtime = ts.tv_sec;
413 ip->i_birthnsec = ts.tv_nsec;
414
415/*
416printf("ext2_valloc: allocated inode %d\n", ino);
417*/
418 return (0);
419noinodes:
420 EXT2_UNLOCK(ump);
421 ext2_fserr(fs, cred->cr_uid, "out of inodes");
422 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt);
423 return (ENOSPC);
424}
425
426/*
427 * Find a cylinder to place a directory.
428 *
429 * The policy implemented by this algorithm is to allocate a
430 * directory inode in the same cylinder group as its parent
431 * directory, but also to reserve space for its files inodes
432 * and data. Restrict the number of directories which may be
433 * allocated one after another in the same cylinder group
434 * without intervening allocation of files.
435 *
436 * If we allocate a first level directory then force allocation
437 * in another cylinder group.
438 *
439 */
440static u_long
441ext2_dirpref(struct inode *pip)
442{
443 struct m_ext2fs *fs;
444 int cg, prefcg, dirsize, cgsize;
445 int avgifree, avgbfree, avgndir, curdirsize;
446 int minifree, minbfree, maxndir;
447 int mincg, minndir;
448 int maxcontigdirs;
449
450 mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED);
451 fs = pip->i_e2fs;
452
453 avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount;
454 avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount;
455 avgndir = fs->e2fs_total_dir / fs->e2fs_gcount;
456
457 /*
458 * Force allocation in another cg if creating a first level dir.
459 */
460 ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref");
461 if (ITOV(pip)->v_vflag & VV_ROOT) {
462 prefcg = arc4random() % fs->e2fs_gcount;
463 mincg = prefcg;
464 minndir = fs->e2fs_ipg;
465 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
466 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
467 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
468 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
469 mincg = cg;
470 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
471 }
472 for (cg = 0; cg < prefcg; cg++)
473 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
474 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
475 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
476 mincg = cg;
477 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
478 }
479
480 return (mincg);
481 }
482
483 /*
484 * Count various limits which used for
485 * optimal allocation of a directory inode.
486 */
487 maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg);
488 minifree = avgifree - avgifree / 4;
489 if (minifree < 1)
490 minifree = 1;
491 minbfree = avgbfree - avgbfree / 4;
492 if (minbfree < 1)
493 minbfree = 1;
494 cgsize = fs->e2fs_fsize * fs->e2fs_fpg;
495 dirsize = AVGDIRSIZE;
496 curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0;
497 if (dirsize < curdirsize)
498 dirsize = curdirsize;
499 if (dirsize <= 0)
500 maxcontigdirs = 0; /* dirsize overflowed */
501 else
502 maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255);
503 maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR);
504 if (maxcontigdirs == 0)
505 maxcontigdirs = 1;
506
507 /*
508 * Limit number of dirs in one cg and reserve space for
509 * regular files, but only if we have no deficit in
510 * inodes or space.
511 */
512 prefcg = ino_to_cg(fs, pip->i_number);
513 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
514 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
515 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
516 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
517 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
518 return (cg);
519 }
520 for (cg = 0; cg < prefcg; cg++)
521 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
522 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
523 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
524 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
525 return (cg);
526 }
527 /*
528 * This is a backstop when we have deficit in space.
529 */
530 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
531 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
532 return (cg);
533 for (cg = 0; cg < prefcg; cg++)
534 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
535 break;
536 return (cg);
537}
538
539/*
540 * Select the desired position for the next block in a file.
541 *
542 * we try to mimic what Remy does in inode_getblk/block_getblk
543 *
544 * we note: blocknr == 0 means that we're about to allocate either
545 * a direct block or a pointer block at the first level of indirection
546 * (In other words, stuff that will go in i_db[] or i_ib[])
547 *
548 * blocknr != 0 means that we're allocating a block that is none
549 * of the above. Then, blocknr tells us the number of the block
550 * that will hold the pointer
551 */
552int32_t
553ext2_blkpref(struct inode *ip, int32_t lbn, int indx, int32_t *bap,
554 int32_t blocknr)
555{
556 int tmp;
557 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
558
559 /* if the next block is actually what we thought it is,
560 then set the goal to what we thought it should be
561 */
562 if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0)
563 return ip->i_next_alloc_goal;
564
565 /* now check whether we were provided with an array that basically
566 tells us previous blocks to which we want to stay closeby
567 */
568 if (bap)
569 for (tmp = indx - 1; tmp >= 0; tmp--)
570 if (bap[tmp])
571 return bap[tmp];
572
573 /* else let's fall back to the blocknr, or, if there is none,
574 follow the rule that a block should be allocated near its inode
575 */
576 return blocknr ? blocknr :
577 (int32_t)(ip->i_block_group *
578 EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
579 ip->i_e2fs->e2fs->e2fs_first_dblock;
580}
581
582/*
583 * Implement the cylinder overflow algorithm.
584 *
585 * The policy implemented by this algorithm is:
586 * 1) allocate the block in its requested cylinder group.
587 * 2) quadradically rehash on the cylinder group number.
588 * 3) brute force search for a free block.
589 */
590static u_long
591ext2_hashalloc(struct inode *ip, int cg, long pref, int size,
592 daddr_t (*allocator)(struct inode *, int, daddr_t, int))
593{
594 struct m_ext2fs *fs;
595 ino_t result;
596 int i, icg = cg;
597
598 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
599 fs = ip->i_e2fs;
600 /*
601 * 1: preferred cylinder group
602 */
603 result = (*allocator)(ip, cg, pref, size);
604 if (result)
605 return (result);
606 /*
607 * 2: quadratic rehash
608 */
609 for (i = 1; i < fs->e2fs_gcount; i *= 2) {
610 cg += i;
611 if (cg >= fs->e2fs_gcount)
612 cg -= fs->e2fs_gcount;
613 result = (*allocator)(ip, cg, 0, size);
614 if (result)
615 return (result);
616 }
617 /*
618 * 3: brute force search
619 * Note that we start at i == 2, since 0 was checked initially,
620 * and 1 is always checked in the quadratic rehash.
621 */
622 cg = (icg + 2) % fs->e2fs_gcount;
623 for (i = 2; i < fs->e2fs_gcount; i++) {
624 result = (*allocator)(ip, cg, 0, size);
625 if (result)
626 return (result);
627 cg++;
628 if (cg == fs->e2fs_gcount)
629 cg = 0;
630 }
631 return (0);
632}
633
634/*
635 * Determine whether a block can be allocated.
636 *
637 * Check to see if a block of the appropriate size is available,
638 * and if it is, allocate it.
639 */
640static daddr_t
641ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
642{
643 struct m_ext2fs *fs;
644 struct buf *bp;
645 struct ext2mount *ump;
646 daddr_t bno, runstart, runlen;
647 int bit, loc, end, error, start;
648 char *bbp;
649 /* XXX ondisk32 */
650 fs = ip->i_e2fs;
651 ump = ip->i_ump;
652 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
653 return (0);
654 EXT2_UNLOCK(ump);
655 error = bread(ip->i_devvp, fsbtodb(fs,
656 fs->e2fs_gd[cg].ext2bgd_b_bitmap),
657 (int)fs->e2fs_bsize, NOCRED, &bp);
658 if (error) {
659 brelse(bp);
660 EXT2_LOCK(ump);
661 return (0);
662 }
663 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
664 /*
665 * Another thread allocated the last block in this
666 * group while we were waiting for the buffer.
667 */
668 brelse(bp);
669 EXT2_LOCK(ump);
670 return (0);
671 }
672 bbp = (char *)bp->b_data;
673
674 if (dtog(fs, bpref) != cg)
675 bpref = 0;
676 if (bpref != 0) {
677 bpref = dtogd(fs, bpref);
678 /*
679 * if the requested block is available, use it
680 */
681 if (isclr(bbp, bpref)) {
682 bno = bpref;
683 goto gotit;
684 }
685 }
686 /*
687 * no blocks in the requested cylinder, so take next
688 * available one in this cylinder group.
689 * first try to get 8 contigous blocks, then fall back to a single
690 * block.
691 */
692 if (bpref)
693 start = dtogd(fs, bpref) / NBBY;
694 else
695 start = 0;
696 end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
697retry:
698 runlen = 0;
699 runstart = 0;
700 for (loc = start; loc < end; loc++) {
701 if (bbp[loc] == (char)0xff) {
702 runlen = 0;
703 continue;
704 }
705
706 /* Start of a run, find the number of high clear bits. */
707 if (runlen == 0) {
708 bit = fls(bbp[loc]);
709 runlen = NBBY - bit;
710 runstart = loc * NBBY + bit;
711 } else if (bbp[loc] == 0) {
712 /* Continue a run. */
713 runlen += NBBY;
714 } else {
715 /*
716 * Finish the current run. If it isn't long
717 * enough, start a new one.
718 */
719 bit = ffs(bbp[loc]) - 1;
720 runlen += bit;
721 if (runlen >= 8) {
722 bno = runstart;
723 goto gotit;
724 }
725
726 /* Run was too short, start a new one. */
727 bit = fls(bbp[loc]);
728 runlen = NBBY - bit;
729 runstart = loc * NBBY + bit;
730 }
731
732 /* If the current run is long enough, use it. */
733 if (runlen >= 8) {
734 bno = runstart;
735 goto gotit;
736 }
737 }
738 if (start != 0) {
739 end = start;
740 start = 0;
741 goto retry;
742 }
743
744 bno = ext2_mapsearch(fs, bbp, bpref);
745 if (bno < 0){
746 brelse(bp);
747 EXT2_LOCK(ump);
748 return (0);
749 }
750gotit:
751#ifdef DIAGNOSTIC
752 if (isset(bbp, bno)) {
753 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n",
754 cg, (intmax_t)bno, fs->e2fs_fsmnt);
755 panic("ext2fs_alloccg: dup alloc");
756 }
757#endif
758 setbit(bbp, bno);
759 EXT2_LOCK(ump);
760 ext2_clusteracct(fs, bbp, cg, bno, -1);
761 fs->e2fs->e2fs_fbcount--;
762 fs->e2fs_gd[cg].ext2bgd_nbfree--;
763 fs->e2fs_fmod = 1;
764 EXT2_UNLOCK(ump);
765 bdwrite(bp);
766 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
767}
768
769/*
770 * Determine whether a cluster can be allocated.
771 */
772static daddr_t
773ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len)
774{
775 struct m_ext2fs *fs;
776 struct ext2mount *ump;
777 struct buf *bp;
778 char *bbp;
779 int bit, error, got, i, loc, run;
780 int32_t *lp;
781 daddr_t bno;
782
783 fs = ip->i_e2fs;
784 ump = ip->i_ump;
785
786 if (fs->e2fs_maxcluster[cg] < len)
787 return (0);
788
789 EXT2_UNLOCK(ump);
790 error = bread(ip->i_devvp,
791 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
792 (int)fs->e2fs_bsize, NOCRED, &bp);
793 if (error)
794 goto fail_lock;
795
796 bbp = (char *)bp->b_data;
797 EXT2_LOCK(ump);
798 /*
799 * Check to see if a cluster of the needed size (or bigger) is
800 * available in this cylinder group.
801 */
802 lp = &fs->e2fs_clustersum[cg].cs_sum[len];
803 for (i = len; i <= fs->e2fs_contigsumsize; i++)
804 if (*lp++ > 0)
805 break;
806 if (i > fs->e2fs_contigsumsize) {
807 /*
808 * Update the cluster summary information to reflect
809 * the true maximum-sized cluster so that future cluster
810 * allocation requests can avoid reading the bitmap only
811 * to find no cluster.
812 */
813 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1];
814 for (i = len - 1; i > 0; i--)
815 if (*lp-- > 0)
816 break;
817 fs->e2fs_maxcluster[cg] = i;
818 goto fail;
819 }
820 EXT2_UNLOCK(ump);
821
822 /* Search the bitmap to find a big enough cluster like in FFS. */
823 if (dtog(fs, bpref) != cg)
824 bpref = 0;
825 if (bpref != 0)
826 bpref = dtogd(fs, bpref);
827 loc = bpref / NBBY;
828 bit = 1 << (bpref % NBBY);
829 for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) {
830 if ((bbp[loc] & bit) != 0)
831 run = 0;
832 else {
833 run++;
834 if (run == len)
835 break;
836 }
837 if ((got & (NBBY - 1)) != (NBBY - 1))
838 bit <<= 1;
839 else {
840 loc++;
841 bit = 1;
842 }
843 }
844
845 if (got >= fs->e2fs->e2fs_fpg)
846 goto fail_lock;
847
848 /* Allocate the cluster that we found. */
849 for (i = 1; i < len; i++)
850 if (!isclr(bbp, got - run + i))
851 panic("ext2_clusteralloc: map mismatch");
852
853 bno = got - run + 1;
854 if (bno >= fs->e2fs->e2fs_fpg)
855 panic("ext2_clusteralloc: allocated out of group");
856
857 EXT2_LOCK(ump);
858 for (i = 0; i < len; i += fs->e2fs_fpb) {
859 setbit(bbp, bno + i);
860 ext2_clusteracct(fs, bbp, cg, bno + i, -1);
861 fs->e2fs->e2fs_fbcount--;
862 fs->e2fs_gd[cg].ext2bgd_nbfree--;
863 }
864 fs->e2fs_fmod = 1;
865 EXT2_UNLOCK(ump);
866
867 bdwrite(bp);
868 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
869
870fail_lock:
871 EXT2_LOCK(ump);
872fail:
873 brelse(bp);
874 return (0);
875}
876
877/*
878 * Determine whether an inode can be allocated.
879 *
880 * Check to see if an inode is available, and if it is,
881 * allocate it using tode in the specified cylinder group.
882 */
883static daddr_t
884ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
885{
886 struct m_ext2fs *fs;
887 struct buf *bp;
888 struct ext2mount *ump;
889 int error, start, len;
890 char *ibp, *loc;
891 ipref--; /* to avoid a lot of (ipref -1) */
892 if (ipref == -1)
893 ipref = 0;
894 fs = ip->i_e2fs;
895 ump = ip->i_ump;
896 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
897 return (0);
898 EXT2_UNLOCK(ump);
899 error = bread(ip->i_devvp, fsbtodb(fs,
900 fs->e2fs_gd[cg].ext2bgd_i_bitmap),
901 (int)fs->e2fs_bsize, NOCRED, &bp);
902 if (error) {
903 brelse(bp);
904 EXT2_LOCK(ump);
905 return (0);
906 }
907 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) {
908 /*
909 * Another thread allocated the last i-node in this
910 * group while we were waiting for the buffer.
911 */
912 brelse(bp);
913 EXT2_LOCK(ump);
914 return (0);
915 }
916 ibp = (char *)bp->b_data;
917 if (ipref) {
918 ipref %= fs->e2fs->e2fs_ipg;
919 if (isclr(ibp, ipref))
920 goto gotit;
921 }
922 start = ipref / NBBY;
923 len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY);
924 loc = memcchr(&ibp[start], 0xff, len);
925 if (loc == NULL) {
926 len = start + 1;
927 start = 0;
928 loc = memcchr(&ibp[start], 0xff, len);
929 if (loc == NULL) {
930 printf("cg = %d, ipref = %lld, fs = %s\n",
931 cg, (long long)ipref, fs->e2fs_fsmnt);
932 panic("ext2fs_nodealloccg: map corrupted");
933 /* NOTREACHED */
934 }
935 }
936 ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1;
937gotit:
938 setbit(ibp, ipref);
939 EXT2_LOCK(ump);
940 fs->e2fs_gd[cg].ext2bgd_nifree--;
941 fs->e2fs->e2fs_ficount--;
942 fs->e2fs_fmod = 1;
943 if ((mode & IFMT) == IFDIR) {
944 fs->e2fs_gd[cg].ext2bgd_ndirs++;
945 fs->e2fs_total_dir++;
946 }
947 EXT2_UNLOCK(ump);
948 bdwrite(bp);
949 return (cg * fs->e2fs->e2fs_ipg + ipref +1);
950}
951
952/*
953 * Free a block or fragment.
954 *
955 */
956void
957ext2_blkfree(struct inode *ip, int32_t bno, long size)
958{
959 struct m_ext2fs *fs;
960 struct buf *bp;
961 struct ext2mount *ump;
962 int cg, error;
963 char *bbp;
964
965 fs = ip->i_e2fs;
966 ump = ip->i_ump;
967 cg = dtog(fs, bno);
968 if ((u_int)bno >= fs->e2fs->e2fs_bcount) {
969 printf("bad block %lld, ino %llu\n", (long long)bno,
970 (unsigned long long)ip->i_number);
971 ext2_fserr(fs, ip->i_uid, "bad block");
972 return;
973 }
974 error = bread(ip->i_devvp,
975 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
976 (int)fs->e2fs_bsize, NOCRED, &bp);
977 if (error) {
978 brelse(bp);
979 return;
980 }
981 bbp = (char *)bp->b_data;
982 bno = dtogd(fs, bno);
983 if (isclr(bbp, bno)) {
984 printf("block = %lld, fs = %s\n",
985 (long long)bno, fs->e2fs_fsmnt);
986 panic("ext2_blkfree: freeing free block");
987 }
988 clrbit(bbp, bno);
989 EXT2_LOCK(ump);
990 ext2_clusteracct(fs, bbp, cg, bno, 1);
991 fs->e2fs->e2fs_fbcount++;
992 fs->e2fs_gd[cg].ext2bgd_nbfree++;
993 fs->e2fs_fmod = 1;
994 EXT2_UNLOCK(ump);
995 bdwrite(bp);
996}
997
998/*
999 * Free an inode.
1000 *
1001 */
1002int
1003ext2_vfree(struct vnode *pvp, ino_t ino, int mode)
1004{
1005 struct m_ext2fs *fs;
1006 struct inode *pip;
1007 struct buf *bp;
1008 struct ext2mount *ump;
1009 int error, cg;
1010 char * ibp;
1011
1012 pip = VTOI(pvp);
1013 fs = pip->i_e2fs;
1014 ump = pip->i_ump;
1015 if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount)
1016 panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s",
1017 pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt);
1018
1019 cg = ino_to_cg(fs, ino);
1020 error = bread(pip->i_devvp,
1021 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
1022 (int)fs->e2fs_bsize, NOCRED, &bp);
1023 if (error) {
1024 brelse(bp);
1025 return (0);
1026 }
1027 ibp = (char *)bp->b_data;
1028 ino = (ino - 1) % fs->e2fs->e2fs_ipg;
1029 if (isclr(ibp, ino)) {
1030 printf("ino = %llu, fs = %s\n",
1031 (unsigned long long)ino, fs->e2fs_fsmnt);
1032 if (fs->e2fs_ronly == 0)
1033 panic("ext2_vfree: freeing free inode");
1034 }
1035 clrbit(ibp, ino);
1036 EXT2_LOCK(ump);
1037 fs->e2fs->e2fs_ficount++;
1038 fs->e2fs_gd[cg].ext2bgd_nifree++;
1039 if ((mode & IFMT) == IFDIR) {
1040 fs->e2fs_gd[cg].ext2bgd_ndirs--;
1041 fs->e2fs_total_dir--;
1042 }
1043 fs->e2fs_fmod = 1;
1044 EXT2_UNLOCK(ump);
1045 bdwrite(bp);
1046 return (0);
1047}
1048
1049/*
1050 * Find a block in the specified cylinder group.
1051 *
1052 * It is a panic if a request is made to find a block if none are
1053 * available.
1054 */
1055static daddr_t
1056ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref)
1057{
1058 char *loc;
1059 int start, len;
1060
1061 /*
1062 * find the fragment by searching through the free block
1063 * map for an appropriate bit pattern
1064 */
1065 if (bpref)
1066 start = dtogd(fs, bpref) / NBBY;
1067 else
1068 start = 0;
1069 len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
1070 loc = memcchr(&bbp[start], 0xff, len);
1071 if (loc == NULL) {
1072 len = start + 1;
1073 start = 0;
1074 loc = memcchr(&bbp[start], 0xff, len);
1075 if (loc == NULL) {
1076 printf("start = %d, len = %d, fs = %s\n",
1077 start, len, fs->e2fs_fsmnt);
1078 panic("ext2_mapsearch: map corrupted");
1079 /* NOTREACHED */
1080 }
1081 }
1082 return ((loc - bbp) * NBBY + ffs(~*loc) - 1);
1083}
1084
1085/*
1086 * Fserr prints the name of a file system with an error diagnostic.
1087 *
1088 * The form of the error message is:
1089 * fs: error message
1090 */
1091static void
1092ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp)
1093{
1094
1095 log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp);
1096}
1097
1098int
1099cg_has_sb(int i)
1100{
1101 int a3, a5, a7;
1102
1103 if (i == 0 || i == 1)
1104 return 1;
1105 for (a3 = 3, a5 = 5, a7 = 7;
1106 a3 <= i || a5 <= i || a7 <= i;
1107 a3 *= 3, a5 *= 5, a7 *= 7)
1108 if (i == a3 || i == a5 || i == a7)
1109 return 1;
1110 return 0;
1111}