Deleted Added
full compact
ext2_alloc.c (251809) ext2_alloc.c (252103)
1/*-
2 * modified for Lites 1.1
3 *
4 * Aug 1995, Godmar Back (gback@cs.utah.edu)
5 * University of Utah, Department of Computer Science
6 */
7/*-
8 * Copyright (c) 1982, 1986, 1989, 1993
9 * The Regents of the University of California. All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94
1/*-
2 * modified for Lites 1.1
3 *
4 * Aug 1995, Godmar Back (gback@cs.utah.edu)
5 * University of Utah, Department of Computer Science
6 */
7/*-
8 * Copyright (c) 1982, 1986, 1989, 1993
9 * The Regents of the University of California. All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94
36 * $FreeBSD: head/sys/fs/ext2fs/ext2_alloc.c 251809 2013-06-16 16:10:45Z pfg $
36 * $FreeBSD: head/sys/fs/ext2fs/ext2_alloc.c 252103 2013-06-23 02:44:42Z pfg $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/conf.h>
42#include <sys/vnode.h>
43#include <sys/stat.h>
44#include <sys/mount.h>
45#include <sys/sysctl.h>
46#include <sys/syslog.h>
47#include <sys/buf.h>
48
49#include <fs/ext2fs/fs.h>
50#include <fs/ext2fs/inode.h>
51#include <fs/ext2fs/ext2_mount.h>
52#include <fs/ext2fs/ext2fs.h>
53#include <fs/ext2fs/ext2_extern.h>
54
55static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int);
56static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int);
57static u_long ext2_dirpref(struct inode *);
58static void ext2_fserr(struct m_ext2fs *, uid_t, char *);
59static u_long ext2_hashalloc(struct inode *, int, long, int,
60 daddr_t (*)(struct inode *, int, daddr_t,
61 int));
62static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int);
63static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t);
64
65/*
66 * Allocate a block in the filesystem.
67 *
68 * A preference may be optionally specified. If a preference is given
69 * the following hierarchy is used to allocate a block:
70 * 1) allocate the requested block.
71 * 2) allocate a rotationally optimal block in the same cylinder.
72 * 3) allocate a block in the same cylinder group.
73 * 4) quadradically rehash into other cylinder groups, until an
74 * available block is located.
75 * If no block preference is given the following hierarchy is used
76 * to allocate a block:
77 * 1) allocate a block in the cylinder group that contains the
78 * inode for the file.
79 * 2) quadradically rehash into other cylinder groups, until an
80 * available block is located.
81 */
82int
83ext2_alloc(struct inode *ip, int32_t lbn, int32_t bpref, int size,
84 struct ucred *cred, int32_t *bnp)
85{
86 struct m_ext2fs *fs;
87 struct ext2mount *ump;
88 int32_t bno;
89 int cg;
90 *bnp = 0;
91 fs = ip->i_e2fs;
92 ump = ip->i_ump;
93 mtx_assert(EXT2_MTX(ump), MA_OWNED);
94#ifdef INVARIANTS
95 if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) {
96 vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n",
97 (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt);
98 panic("ext2_alloc: bad size");
99 }
100 if (cred == NOCRED)
101 panic("ext2_alloc: missing credential");
102#endif /* INVARIANTS */
103 if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0)
104 goto nospace;
105 if (cred->cr_uid != 0 &&
106 fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount)
107 goto nospace;
108 if (bpref >= fs->e2fs->e2fs_bcount)
109 bpref = 0;
110 if (bpref == 0)
111 cg = ino_to_cg(fs, ip->i_number);
112 else
113 cg = dtog(fs, bpref);
114 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize,
115 ext2_alloccg);
116 if (bno > 0) {
117 /* set next_alloc fields as done in block_getblk */
118 ip->i_next_alloc_block = lbn;
119 ip->i_next_alloc_goal = bno;
120
121 ip->i_blocks += btodb(fs->e2fs_bsize);
122 ip->i_flag |= IN_CHANGE | IN_UPDATE;
123 *bnp = bno;
124 return (0);
125 }
126nospace:
127 EXT2_UNLOCK(ump);
128 ext2_fserr(fs, cred->cr_uid, "filesystem full");
129 uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt);
130 return (ENOSPC);
131}
132
133/*
134 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
135 *
136 * The vnode and an array of buffer pointers for a range of sequential
137 * logical blocks to be made contiguous is given. The allocator attempts
138 * to find a range of sequential blocks starting as close as possible to
139 * an fs_rotdelay offset from the end of the allocation for the logical
140 * block immediately preceding the current range. If successful, the
141 * physical block numbers in the buffer pointers and in the inode are
142 * changed to reflect the new allocation. If unsuccessful, the allocation
143 * is left unchanged. The success in doing the reallocation is returned.
144 * Note that the error return is not reflected back to the user. Rather
145 * the previous block allocation will be used.
146 */
147
148static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem");
149
150static int doasyncfree = 0;
151SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
152 "Use asychronous writes to update block pointers when freeing blocks");
153
154static int doreallocblks = 0;
155SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
156
157int
158ext2_reallocblks(struct vop_reallocblks_args *ap)
159{
160 struct m_ext2fs *fs;
161 struct inode *ip;
162 struct vnode *vp;
163 struct buf *sbp, *ebp;
164 uint32_t *bap, *sbap, *ebap = 0;
165 struct ext2mount *ump;
166 struct cluster_save *buflist;
167 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/conf.h>
42#include <sys/vnode.h>
43#include <sys/stat.h>
44#include <sys/mount.h>
45#include <sys/sysctl.h>
46#include <sys/syslog.h>
47#include <sys/buf.h>
48
49#include <fs/ext2fs/fs.h>
50#include <fs/ext2fs/inode.h>
51#include <fs/ext2fs/ext2_mount.h>
52#include <fs/ext2fs/ext2fs.h>
53#include <fs/ext2fs/ext2_extern.h>
54
55static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int);
56static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int);
57static u_long ext2_dirpref(struct inode *);
58static void ext2_fserr(struct m_ext2fs *, uid_t, char *);
59static u_long ext2_hashalloc(struct inode *, int, long, int,
60 daddr_t (*)(struct inode *, int, daddr_t,
61 int));
62static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int);
63static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t);
64
65/*
66 * Allocate a block in the filesystem.
67 *
68 * A preference may be optionally specified. If a preference is given
69 * the following hierarchy is used to allocate a block:
70 * 1) allocate the requested block.
71 * 2) allocate a rotationally optimal block in the same cylinder.
72 * 3) allocate a block in the same cylinder group.
73 * 4) quadradically rehash into other cylinder groups, until an
74 * available block is located.
75 * If no block preference is given the following hierarchy is used
76 * to allocate a block:
77 * 1) allocate a block in the cylinder group that contains the
78 * inode for the file.
79 * 2) quadradically rehash into other cylinder groups, until an
80 * available block is located.
81 */
82int
83ext2_alloc(struct inode *ip, int32_t lbn, int32_t bpref, int size,
84 struct ucred *cred, int32_t *bnp)
85{
86 struct m_ext2fs *fs;
87 struct ext2mount *ump;
88 int32_t bno;
89 int cg;
90 *bnp = 0;
91 fs = ip->i_e2fs;
92 ump = ip->i_ump;
93 mtx_assert(EXT2_MTX(ump), MA_OWNED);
94#ifdef INVARIANTS
95 if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) {
96 vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n",
97 (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt);
98 panic("ext2_alloc: bad size");
99 }
100 if (cred == NOCRED)
101 panic("ext2_alloc: missing credential");
102#endif /* INVARIANTS */
103 if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0)
104 goto nospace;
105 if (cred->cr_uid != 0 &&
106 fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount)
107 goto nospace;
108 if (bpref >= fs->e2fs->e2fs_bcount)
109 bpref = 0;
110 if (bpref == 0)
111 cg = ino_to_cg(fs, ip->i_number);
112 else
113 cg = dtog(fs, bpref);
114 bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize,
115 ext2_alloccg);
116 if (bno > 0) {
117 /* set next_alloc fields as done in block_getblk */
118 ip->i_next_alloc_block = lbn;
119 ip->i_next_alloc_goal = bno;
120
121 ip->i_blocks += btodb(fs->e2fs_bsize);
122 ip->i_flag |= IN_CHANGE | IN_UPDATE;
123 *bnp = bno;
124 return (0);
125 }
126nospace:
127 EXT2_UNLOCK(ump);
128 ext2_fserr(fs, cred->cr_uid, "filesystem full");
129 uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt);
130 return (ENOSPC);
131}
132
133/*
134 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
135 *
136 * The vnode and an array of buffer pointers for a range of sequential
137 * logical blocks to be made contiguous is given. The allocator attempts
138 * to find a range of sequential blocks starting as close as possible to
139 * an fs_rotdelay offset from the end of the allocation for the logical
140 * block immediately preceding the current range. If successful, the
141 * physical block numbers in the buffer pointers and in the inode are
142 * changed to reflect the new allocation. If unsuccessful, the allocation
143 * is left unchanged. The success in doing the reallocation is returned.
144 * Note that the error return is not reflected back to the user. Rather
145 * the previous block allocation will be used.
146 */
147
148static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem");
149
150static int doasyncfree = 0;
151SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
152 "Use asychronous writes to update block pointers when freeing blocks");
153
154static int doreallocblks = 0;
155SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
156
157int
158ext2_reallocblks(struct vop_reallocblks_args *ap)
159{
160 struct m_ext2fs *fs;
161 struct inode *ip;
162 struct vnode *vp;
163 struct buf *sbp, *ebp;
164 uint32_t *bap, *sbap, *ebap = 0;
165 struct ext2mount *ump;
166 struct cluster_save *buflist;
167 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
168 int32_t start_lbn, end_lbn, soff, newblk, blkno;
168 e2fs_lbn_t start_lbn, end_lbn;
169 int32_t soff, newblk, blkno;
169 int i, len, start_lvl, end_lvl, pref, ssize;
170
171 if (doreallocblks == 0)
172 return (ENOSPC);
173
174 vp = ap->a_vp;
175 ip = VTOI(vp);
176 fs = ip->i_e2fs;
177 ump = ip->i_ump;
178
179 if (fs->e2fs_contigsumsize <= 0)
180 return (ENOSPC);
181
182 buflist = ap->a_buflist;
183 len = buflist->bs_nchildren;
184 start_lbn = buflist->bs_children[0]->b_lblkno;
185 end_lbn = start_lbn + len - 1;
186#ifdef INVARIANTS
187 for (i = 1; i < len; i++)
188 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
189 panic("ext2_reallocblks: non-cluster");
190#endif
191 /*
192 * If the cluster crosses the boundary for the first indirect
193 * block, leave space for the indirect block. Indirect blocks
194 * are initially laid out in a position after the last direct
195 * block. Block reallocation would usually destroy locality by
196 * moving the indirect block out of the way to make room for
197 * data blocks if we didn't compensate here. We should also do
198 * this for other indirect block boundaries, but it is only
199 * important for the first one.
200 */
201 if (start_lbn < NDADDR && end_lbn >= NDADDR)
202 return (ENOSPC);
203 /*
204 * If the latest allocation is in a new cylinder group, assume that
205 * the filesystem has decided to move and do not force it back to
206 * the previous cylinder group.
207 */
208 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
209 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
210 return (ENOSPC);
211 if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
212 ext2_getlbns(vp, end_lbn, end_ap, &end_lvl))
213 return (ENOSPC);
214 /*
215 * Get the starting offset and block map for the first block.
216 */
217 if (start_lvl == 0) {
218 sbap = &ip->i_db[0];
219 soff = start_lbn;
220 } else {
221 idp = &start_ap[start_lvl - 1];
222 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) {
223 brelse(sbp);
224 return (ENOSPC);
225 }
226 sbap = (u_int *)sbp->b_data;
227 soff = idp->in_off;
228 }
229 /*
230 * If the block range spans two block maps, get the second map.
231 */
232 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
233 ssize = len;
234 } else {
235#ifdef INVARIANTS
236 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn)
237 panic("ext2_reallocblks: start == end");
238#endif
239 ssize = len - (idp->in_off + 1);
240 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp))
241 goto fail;
242 ebap = (u_int *)ebp->b_data;
243 }
244 /*
245 * Find the preferred location for the cluster.
246 */
247 EXT2_LOCK(ump);
248 pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0);
249 /*
250 * Search the block map looking for an allocation of the desired size.
251 */
252 if ((newblk = (int32_t)ext2_hashalloc(ip, dtog(fs, pref), pref,
253 len, ext2_clusteralloc)) == 0){
254 EXT2_UNLOCK(ump);
255 goto fail;
256 }
257 /*
258 * We have found a new contiguous block.
259 *
260 * First we have to replace the old block pointers with the new
261 * block pointers in the inode and indirect blocks associated
262 * with the file.
263 */
264#ifdef DEBUG
265 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
266 (intmax_t)start_lbn, (intmax_t)end_lbn);
267#endif /* DEBUG */
268 blkno = newblk;
269 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
270 if (i == ssize) {
271 bap = ebap;
272 soff = -i;
273 }
274#ifdef INVARIANTS
275 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap))
276 panic("ext2_reallocblks: alloc mismatch");
277#endif
278#ifdef DEBUG
279 printf(" %d,", *bap);
280#endif /* DEBUG */
281 *bap++ = blkno;
282 }
283 /*
284 * Next we must write out the modified inode and indirect blocks.
285 * For strict correctness, the writes should be synchronous since
286 * the old block values may have been written to disk. In practise
287 * they are almost never written, but if we are concerned about
288 * strict correctness, the `doasyncfree' flag should be set to zero.
289 *
290 * The test on `doasyncfree' should be changed to test a flag
291 * that shows whether the associated buffers and inodes have
292 * been written. The flag should be set when the cluster is
293 * started and cleared whenever the buffer or inode is flushed.
294 * We can then check below to see if it is set, and do the
295 * synchronous write only when it has been cleared.
296 */
297 if (sbap != &ip->i_db[0]) {
298 if (doasyncfree)
299 bdwrite(sbp);
300 else
301 bwrite(sbp);
302 } else {
303 ip->i_flag |= IN_CHANGE | IN_UPDATE;
304 if (!doasyncfree)
305 ext2_update(vp, 1);
306 }
307 if (ssize < len) {
308 if (doasyncfree)
309 bdwrite(ebp);
310 else
311 bwrite(ebp);
312 }
313 /*
314 * Last, free the old blocks and assign the new blocks to the buffers.
315 */
316#ifdef DEBUG
317 printf("\n\tnew:");
318#endif /* DEBUG */
319 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
320 ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno),
321 fs->e2fs_bsize);
322 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
323#ifdef DEBUG
324 printf(" %d,", blkno);
325#endif /* DEBUG */
326 }
327#ifdef DEBUG
328 printf("\n");
329#endif /* DEBUG */
330 return (0);
331
332fail:
333 if (ssize < len)
334 brelse(ebp);
335 if (sbap != &ip->i_db[0])
336 brelse(sbp);
337 return (ENOSPC);
338}
339
340/*
341 * Allocate an inode in the filesystem.
342 *
343 */
344int
345ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp)
346{
347 struct timespec ts;
348 struct inode *pip;
349 struct m_ext2fs *fs;
350 struct inode *ip;
351 struct ext2mount *ump;
352 ino_t ino, ipref;
353 int i, error, cg;
354
355 *vpp = NULL;
356 pip = VTOI(pvp);
357 fs = pip->i_e2fs;
358 ump = pip->i_ump;
359
360 EXT2_LOCK(ump);
361 if (fs->e2fs->e2fs_ficount == 0)
362 goto noinodes;
363 /*
364 * If it is a directory then obtain a cylinder group based on
365 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is
366 * always the next inode.
367 */
368 if ((mode & IFMT) == IFDIR) {
369 cg = ext2_dirpref(pip);
370 if (fs->e2fs_contigdirs[cg] < 255)
371 fs->e2fs_contigdirs[cg]++;
372 } else {
373 cg = ino_to_cg(fs, pip->i_number);
374 if (fs->e2fs_contigdirs[cg] > 0)
375 fs->e2fs_contigdirs[cg]--;
376 }
377 ipref = cg * fs->e2fs->e2fs_ipg + 1;
378 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg);
379
380 if (ino == 0)
381 goto noinodes;
382 error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
383 if (error) {
384 ext2_vfree(pvp, ino, mode);
385 return (error);
386 }
387 ip = VTOI(*vpp);
388
389 /*
390 * The question is whether using VGET was such good idea at all:
391 * Linux doesn't read the old inode in when it is allocating a
392 * new one. I will set at least i_size and i_blocks to zero.
393 */
394 ip->i_size = 0;
395 ip->i_blocks = 0;
396 ip->i_mode = 0;
397 ip->i_flags = 0;
398 /* now we want to make sure that the block pointers are zeroed out */
399 for (i = 0; i < NDADDR; i++)
400 ip->i_db[i] = 0;
401 for (i = 0; i < NIADDR; i++)
402 ip->i_ib[i] = 0;
403
404 /*
405 * Set up a new generation number for this inode.
406 * XXX check if this makes sense in ext2
407 */
408 if (ip->i_gen == 0 || ++ip->i_gen == 0)
409 ip->i_gen = random() / 2 + 1;
410
411 vfs_timestamp(&ts);
412 ip->i_birthtime = ts.tv_sec;
413 ip->i_birthnsec = ts.tv_nsec;
414
415/*
416printf("ext2_valloc: allocated inode %d\n", ino);
417*/
418 return (0);
419noinodes:
420 EXT2_UNLOCK(ump);
421 ext2_fserr(fs, cred->cr_uid, "out of inodes");
422 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt);
423 return (ENOSPC);
424}
425
426/*
427 * Find a cylinder to place a directory.
428 *
429 * The policy implemented by this algorithm is to allocate a
430 * directory inode in the same cylinder group as its parent
431 * directory, but also to reserve space for its files inodes
432 * and data. Restrict the number of directories which may be
433 * allocated one after another in the same cylinder group
434 * without intervening allocation of files.
435 *
436 * If we allocate a first level directory then force allocation
437 * in another cylinder group.
438 *
439 */
440static u_long
441ext2_dirpref(struct inode *pip)
442{
443 struct m_ext2fs *fs;
444 int cg, prefcg, dirsize, cgsize;
445 u_int avgifree, avgbfree, avgndir, curdirsize;
446 u_int minifree, minbfree, maxndir;
447 u_int mincg, minndir;
448 u_int maxcontigdirs;
449
450 mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED);
451 fs = pip->i_e2fs;
452
453 avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount;
454 avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount;
455 avgndir = fs->e2fs_total_dir / fs->e2fs_gcount;
456
457 /*
458 * Force allocation in another cg if creating a first level dir.
459 */
460 ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref");
461 if (ITOV(pip)->v_vflag & VV_ROOT) {
462 prefcg = arc4random() % fs->e2fs_gcount;
463 mincg = prefcg;
464 minndir = fs->e2fs_ipg;
465 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
466 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
467 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
468 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
469 mincg = cg;
470 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
471 }
472 for (cg = 0; cg < prefcg; cg++)
473 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
474 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
475 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
476 mincg = cg;
477 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
478 }
479
480 return (mincg);
481 }
482
483 /*
484 * Count various limits which used for
485 * optimal allocation of a directory inode.
486 */
487 maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg);
488 minifree = avgifree - avgifree / 4;
489 if (minifree < 1)
490 minifree = 1;
491 minbfree = avgbfree - avgbfree / 4;
492 if (minbfree < 1)
493 minbfree = 1;
494 cgsize = fs->e2fs_fsize * fs->e2fs_fpg;
495 dirsize = AVGDIRSIZE;
496 curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0;
497 if (dirsize < curdirsize)
498 dirsize = curdirsize;
499 if (dirsize <= 0)
500 maxcontigdirs = 0; /* dirsize overflowed */
501 else
502 maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255);
503 maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR);
504 if (maxcontigdirs == 0)
505 maxcontigdirs = 1;
506
507 /*
508 * Limit number of dirs in one cg and reserve space for
509 * regular files, but only if we have no deficit in
510 * inodes or space.
511 */
512 prefcg = ino_to_cg(fs, pip->i_number);
513 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
514 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
515 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
516 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
517 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
518 return (cg);
519 }
520 for (cg = 0; cg < prefcg; cg++)
521 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
522 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
523 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
524 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
525 return (cg);
526 }
527 /*
528 * This is a backstop when we have deficit in space.
529 */
530 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
531 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
532 return (cg);
533 for (cg = 0; cg < prefcg; cg++)
534 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
535 break;
536 return (cg);
537}
538
539/*
540 * Select the desired position for the next block in a file.
541 *
542 * we try to mimic what Remy does in inode_getblk/block_getblk
543 *
544 * we note: blocknr == 0 means that we're about to allocate either
545 * a direct block or a pointer block at the first level of indirection
546 * (In other words, stuff that will go in i_db[] or i_ib[])
547 *
548 * blocknr != 0 means that we're allocating a block that is none
549 * of the above. Then, blocknr tells us the number of the block
550 * that will hold the pointer
551 */
552int32_t
170 int i, len, start_lvl, end_lvl, pref, ssize;
171
172 if (doreallocblks == 0)
173 return (ENOSPC);
174
175 vp = ap->a_vp;
176 ip = VTOI(vp);
177 fs = ip->i_e2fs;
178 ump = ip->i_ump;
179
180 if (fs->e2fs_contigsumsize <= 0)
181 return (ENOSPC);
182
183 buflist = ap->a_buflist;
184 len = buflist->bs_nchildren;
185 start_lbn = buflist->bs_children[0]->b_lblkno;
186 end_lbn = start_lbn + len - 1;
187#ifdef INVARIANTS
188 for (i = 1; i < len; i++)
189 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
190 panic("ext2_reallocblks: non-cluster");
191#endif
192 /*
193 * If the cluster crosses the boundary for the first indirect
194 * block, leave space for the indirect block. Indirect blocks
195 * are initially laid out in a position after the last direct
196 * block. Block reallocation would usually destroy locality by
197 * moving the indirect block out of the way to make room for
198 * data blocks if we didn't compensate here. We should also do
199 * this for other indirect block boundaries, but it is only
200 * important for the first one.
201 */
202 if (start_lbn < NDADDR && end_lbn >= NDADDR)
203 return (ENOSPC);
204 /*
205 * If the latest allocation is in a new cylinder group, assume that
206 * the filesystem has decided to move and do not force it back to
207 * the previous cylinder group.
208 */
209 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
210 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
211 return (ENOSPC);
212 if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
213 ext2_getlbns(vp, end_lbn, end_ap, &end_lvl))
214 return (ENOSPC);
215 /*
216 * Get the starting offset and block map for the first block.
217 */
218 if (start_lvl == 0) {
219 sbap = &ip->i_db[0];
220 soff = start_lbn;
221 } else {
222 idp = &start_ap[start_lvl - 1];
223 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) {
224 brelse(sbp);
225 return (ENOSPC);
226 }
227 sbap = (u_int *)sbp->b_data;
228 soff = idp->in_off;
229 }
230 /*
231 * If the block range spans two block maps, get the second map.
232 */
233 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
234 ssize = len;
235 } else {
236#ifdef INVARIANTS
237 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn)
238 panic("ext2_reallocblks: start == end");
239#endif
240 ssize = len - (idp->in_off + 1);
241 if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp))
242 goto fail;
243 ebap = (u_int *)ebp->b_data;
244 }
245 /*
246 * Find the preferred location for the cluster.
247 */
248 EXT2_LOCK(ump);
249 pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0);
250 /*
251 * Search the block map looking for an allocation of the desired size.
252 */
253 if ((newblk = (int32_t)ext2_hashalloc(ip, dtog(fs, pref), pref,
254 len, ext2_clusteralloc)) == 0){
255 EXT2_UNLOCK(ump);
256 goto fail;
257 }
258 /*
259 * We have found a new contiguous block.
260 *
261 * First we have to replace the old block pointers with the new
262 * block pointers in the inode and indirect blocks associated
263 * with the file.
264 */
265#ifdef DEBUG
266 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number,
267 (intmax_t)start_lbn, (intmax_t)end_lbn);
268#endif /* DEBUG */
269 blkno = newblk;
270 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
271 if (i == ssize) {
272 bap = ebap;
273 soff = -i;
274 }
275#ifdef INVARIANTS
276 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap))
277 panic("ext2_reallocblks: alloc mismatch");
278#endif
279#ifdef DEBUG
280 printf(" %d,", *bap);
281#endif /* DEBUG */
282 *bap++ = blkno;
283 }
284 /*
285 * Next we must write out the modified inode and indirect blocks.
286 * For strict correctness, the writes should be synchronous since
287 * the old block values may have been written to disk. In practise
288 * they are almost never written, but if we are concerned about
289 * strict correctness, the `doasyncfree' flag should be set to zero.
290 *
291 * The test on `doasyncfree' should be changed to test a flag
292 * that shows whether the associated buffers and inodes have
293 * been written. The flag should be set when the cluster is
294 * started and cleared whenever the buffer or inode is flushed.
295 * We can then check below to see if it is set, and do the
296 * synchronous write only when it has been cleared.
297 */
298 if (sbap != &ip->i_db[0]) {
299 if (doasyncfree)
300 bdwrite(sbp);
301 else
302 bwrite(sbp);
303 } else {
304 ip->i_flag |= IN_CHANGE | IN_UPDATE;
305 if (!doasyncfree)
306 ext2_update(vp, 1);
307 }
308 if (ssize < len) {
309 if (doasyncfree)
310 bdwrite(ebp);
311 else
312 bwrite(ebp);
313 }
314 /*
315 * Last, free the old blocks and assign the new blocks to the buffers.
316 */
317#ifdef DEBUG
318 printf("\n\tnew:");
319#endif /* DEBUG */
320 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
321 ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno),
322 fs->e2fs_bsize);
323 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
324#ifdef DEBUG
325 printf(" %d,", blkno);
326#endif /* DEBUG */
327 }
328#ifdef DEBUG
329 printf("\n");
330#endif /* DEBUG */
331 return (0);
332
333fail:
334 if (ssize < len)
335 brelse(ebp);
336 if (sbap != &ip->i_db[0])
337 brelse(sbp);
338 return (ENOSPC);
339}
340
341/*
342 * Allocate an inode in the filesystem.
343 *
344 */
345int
346ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp)
347{
348 struct timespec ts;
349 struct inode *pip;
350 struct m_ext2fs *fs;
351 struct inode *ip;
352 struct ext2mount *ump;
353 ino_t ino, ipref;
354 int i, error, cg;
355
356 *vpp = NULL;
357 pip = VTOI(pvp);
358 fs = pip->i_e2fs;
359 ump = pip->i_ump;
360
361 EXT2_LOCK(ump);
362 if (fs->e2fs->e2fs_ficount == 0)
363 goto noinodes;
364 /*
365 * If it is a directory then obtain a cylinder group based on
366 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is
367 * always the next inode.
368 */
369 if ((mode & IFMT) == IFDIR) {
370 cg = ext2_dirpref(pip);
371 if (fs->e2fs_contigdirs[cg] < 255)
372 fs->e2fs_contigdirs[cg]++;
373 } else {
374 cg = ino_to_cg(fs, pip->i_number);
375 if (fs->e2fs_contigdirs[cg] > 0)
376 fs->e2fs_contigdirs[cg]--;
377 }
378 ipref = cg * fs->e2fs->e2fs_ipg + 1;
379 ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg);
380
381 if (ino == 0)
382 goto noinodes;
383 error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
384 if (error) {
385 ext2_vfree(pvp, ino, mode);
386 return (error);
387 }
388 ip = VTOI(*vpp);
389
390 /*
391 * The question is whether using VGET was such good idea at all:
392 * Linux doesn't read the old inode in when it is allocating a
393 * new one. I will set at least i_size and i_blocks to zero.
394 */
395 ip->i_size = 0;
396 ip->i_blocks = 0;
397 ip->i_mode = 0;
398 ip->i_flags = 0;
399 /* now we want to make sure that the block pointers are zeroed out */
400 for (i = 0; i < NDADDR; i++)
401 ip->i_db[i] = 0;
402 for (i = 0; i < NIADDR; i++)
403 ip->i_ib[i] = 0;
404
405 /*
406 * Set up a new generation number for this inode.
407 * XXX check if this makes sense in ext2
408 */
409 if (ip->i_gen == 0 || ++ip->i_gen == 0)
410 ip->i_gen = random() / 2 + 1;
411
412 vfs_timestamp(&ts);
413 ip->i_birthtime = ts.tv_sec;
414 ip->i_birthnsec = ts.tv_nsec;
415
416/*
417printf("ext2_valloc: allocated inode %d\n", ino);
418*/
419 return (0);
420noinodes:
421 EXT2_UNLOCK(ump);
422 ext2_fserr(fs, cred->cr_uid, "out of inodes");
423 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt);
424 return (ENOSPC);
425}
426
427/*
428 * Find a cylinder to place a directory.
429 *
430 * The policy implemented by this algorithm is to allocate a
431 * directory inode in the same cylinder group as its parent
432 * directory, but also to reserve space for its files inodes
433 * and data. Restrict the number of directories which may be
434 * allocated one after another in the same cylinder group
435 * without intervening allocation of files.
436 *
437 * If we allocate a first level directory then force allocation
438 * in another cylinder group.
439 *
440 */
441static u_long
442ext2_dirpref(struct inode *pip)
443{
444 struct m_ext2fs *fs;
445 int cg, prefcg, dirsize, cgsize;
446 u_int avgifree, avgbfree, avgndir, curdirsize;
447 u_int minifree, minbfree, maxndir;
448 u_int mincg, minndir;
449 u_int maxcontigdirs;
450
451 mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED);
452 fs = pip->i_e2fs;
453
454 avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount;
455 avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount;
456 avgndir = fs->e2fs_total_dir / fs->e2fs_gcount;
457
458 /*
459 * Force allocation in another cg if creating a first level dir.
460 */
461 ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref");
462 if (ITOV(pip)->v_vflag & VV_ROOT) {
463 prefcg = arc4random() % fs->e2fs_gcount;
464 mincg = prefcg;
465 minndir = fs->e2fs_ipg;
466 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
467 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
468 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
469 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
470 mincg = cg;
471 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
472 }
473 for (cg = 0; cg < prefcg; cg++)
474 if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
475 fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
476 fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
477 mincg = cg;
478 minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
479 }
480
481 return (mincg);
482 }
483
484 /*
485 * Count various limits which used for
486 * optimal allocation of a directory inode.
487 */
488 maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg);
489 minifree = avgifree - avgifree / 4;
490 if (minifree < 1)
491 minifree = 1;
492 minbfree = avgbfree - avgbfree / 4;
493 if (minbfree < 1)
494 minbfree = 1;
495 cgsize = fs->e2fs_fsize * fs->e2fs_fpg;
496 dirsize = AVGDIRSIZE;
497 curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0;
498 if (dirsize < curdirsize)
499 dirsize = curdirsize;
500 if (dirsize <= 0)
501 maxcontigdirs = 0; /* dirsize overflowed */
502 else
503 maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255);
504 maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR);
505 if (maxcontigdirs == 0)
506 maxcontigdirs = 1;
507
508 /*
509 * Limit number of dirs in one cg and reserve space for
510 * regular files, but only if we have no deficit in
511 * inodes or space.
512 */
513 prefcg = ino_to_cg(fs, pip->i_number);
514 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
515 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
516 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
517 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
518 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
519 return (cg);
520 }
521 for (cg = 0; cg < prefcg; cg++)
522 if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
523 fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
524 fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
525 if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
526 return (cg);
527 }
528 /*
529 * This is a backstop when we have deficit in space.
530 */
531 for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
532 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
533 return (cg);
534 for (cg = 0; cg < prefcg; cg++)
535 if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
536 break;
537 return (cg);
538}
539
540/*
541 * Select the desired position for the next block in a file.
542 *
543 * we try to mimic what Remy does in inode_getblk/block_getblk
544 *
545 * we note: blocknr == 0 means that we're about to allocate either
546 * a direct block or a pointer block at the first level of indirection
547 * (In other words, stuff that will go in i_db[] or i_ib[])
548 *
549 * blocknr != 0 means that we're allocating a block that is none
550 * of the above. Then, blocknr tells us the number of the block
551 * that will hold the pointer
552 */
553int32_t
553ext2_blkpref(struct inode *ip, int32_t lbn, int indx, int32_t *bap,
554ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, int32_t *bap,
554 int32_t blocknr)
555{
556 int tmp;
557 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
558
559 /* if the next block is actually what we thought it is,
560 then set the goal to what we thought it should be
561 */
562 if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0)
563 return ip->i_next_alloc_goal;
564
565 /* now check whether we were provided with an array that basically
566 tells us previous blocks to which we want to stay closeby
567 */
568 if (bap)
569 for (tmp = indx - 1; tmp >= 0; tmp--)
570 if (bap[tmp])
571 return bap[tmp];
572
573 /* else let's fall back to the blocknr, or, if there is none,
574 follow the rule that a block should be allocated near its inode
575 */
576 return blocknr ? blocknr :
577 (int32_t)(ip->i_block_group *
578 EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
579 ip->i_e2fs->e2fs->e2fs_first_dblock;
580}
581
582/*
583 * Implement the cylinder overflow algorithm.
584 *
585 * The policy implemented by this algorithm is:
586 * 1) allocate the block in its requested cylinder group.
587 * 2) quadradically rehash on the cylinder group number.
588 * 3) brute force search for a free block.
589 */
590static u_long
591ext2_hashalloc(struct inode *ip, int cg, long pref, int size,
592 daddr_t (*allocator)(struct inode *, int, daddr_t, int))
593{
594 struct m_ext2fs *fs;
595 ino_t result;
596 int i, icg = cg;
597
598 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
599 fs = ip->i_e2fs;
600 /*
601 * 1: preferred cylinder group
602 */
603 result = (*allocator)(ip, cg, pref, size);
604 if (result)
605 return (result);
606 /*
607 * 2: quadratic rehash
608 */
609 for (i = 1; i < fs->e2fs_gcount; i *= 2) {
610 cg += i;
611 if (cg >= fs->e2fs_gcount)
612 cg -= fs->e2fs_gcount;
613 result = (*allocator)(ip, cg, 0, size);
614 if (result)
615 return (result);
616 }
617 /*
618 * 3: brute force search
619 * Note that we start at i == 2, since 0 was checked initially,
620 * and 1 is always checked in the quadratic rehash.
621 */
622 cg = (icg + 2) % fs->e2fs_gcount;
623 for (i = 2; i < fs->e2fs_gcount; i++) {
624 result = (*allocator)(ip, cg, 0, size);
625 if (result)
626 return (result);
627 cg++;
628 if (cg == fs->e2fs_gcount)
629 cg = 0;
630 }
631 return (0);
632}
633
634/*
635 * Determine whether a block can be allocated.
636 *
637 * Check to see if a block of the appropriate size is available,
638 * and if it is, allocate it.
639 */
640static daddr_t
641ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
642{
643 struct m_ext2fs *fs;
644 struct buf *bp;
645 struct ext2mount *ump;
646 daddr_t bno, runstart, runlen;
647 int bit, loc, end, error, start;
648 char *bbp;
649 /* XXX ondisk32 */
650 fs = ip->i_e2fs;
651 ump = ip->i_ump;
652 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
653 return (0);
654 EXT2_UNLOCK(ump);
655 error = bread(ip->i_devvp, fsbtodb(fs,
656 fs->e2fs_gd[cg].ext2bgd_b_bitmap),
657 (int)fs->e2fs_bsize, NOCRED, &bp);
658 if (error) {
659 brelse(bp);
660 EXT2_LOCK(ump);
661 return (0);
662 }
663 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
664 /*
665 * Another thread allocated the last block in this
666 * group while we were waiting for the buffer.
667 */
668 brelse(bp);
669 EXT2_LOCK(ump);
670 return (0);
671 }
672 bbp = (char *)bp->b_data;
673
674 if (dtog(fs, bpref) != cg)
675 bpref = 0;
676 if (bpref != 0) {
677 bpref = dtogd(fs, bpref);
678 /*
679 * if the requested block is available, use it
680 */
681 if (isclr(bbp, bpref)) {
682 bno = bpref;
683 goto gotit;
684 }
685 }
686 /*
687 * no blocks in the requested cylinder, so take next
688 * available one in this cylinder group.
689 * first try to get 8 contigous blocks, then fall back to a single
690 * block.
691 */
692 if (bpref)
693 start = dtogd(fs, bpref) / NBBY;
694 else
695 start = 0;
696 end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
697retry:
698 runlen = 0;
699 runstart = 0;
700 for (loc = start; loc < end; loc++) {
701 if (bbp[loc] == (char)0xff) {
702 runlen = 0;
703 continue;
704 }
705
706 /* Start of a run, find the number of high clear bits. */
707 if (runlen == 0) {
708 bit = fls(bbp[loc]);
709 runlen = NBBY - bit;
710 runstart = loc * NBBY + bit;
711 } else if (bbp[loc] == 0) {
712 /* Continue a run. */
713 runlen += NBBY;
714 } else {
715 /*
716 * Finish the current run. If it isn't long
717 * enough, start a new one.
718 */
719 bit = ffs(bbp[loc]) - 1;
720 runlen += bit;
721 if (runlen >= 8) {
722 bno = runstart;
723 goto gotit;
724 }
725
726 /* Run was too short, start a new one. */
727 bit = fls(bbp[loc]);
728 runlen = NBBY - bit;
729 runstart = loc * NBBY + bit;
730 }
731
732 /* If the current run is long enough, use it. */
733 if (runlen >= 8) {
734 bno = runstart;
735 goto gotit;
736 }
737 }
738 if (start != 0) {
739 end = start;
740 start = 0;
741 goto retry;
742 }
743
744 bno = ext2_mapsearch(fs, bbp, bpref);
745 if (bno < 0){
746 brelse(bp);
747 EXT2_LOCK(ump);
748 return (0);
749 }
750gotit:
751#ifdef INVARIANTS
752 if (isset(bbp, bno)) {
753 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n",
754 cg, (intmax_t)bno, fs->e2fs_fsmnt);
755 panic("ext2fs_alloccg: dup alloc");
756 }
757#endif
758 setbit(bbp, bno);
759 EXT2_LOCK(ump);
760 ext2_clusteracct(fs, bbp, cg, bno, -1);
761 fs->e2fs->e2fs_fbcount--;
762 fs->e2fs_gd[cg].ext2bgd_nbfree--;
763 fs->e2fs_fmod = 1;
764 EXT2_UNLOCK(ump);
765 bdwrite(bp);
766 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
767}
768
769/*
770 * Determine whether a cluster can be allocated.
771 */
772static daddr_t
773ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len)
774{
775 struct m_ext2fs *fs;
776 struct ext2mount *ump;
777 struct buf *bp;
778 char *bbp;
779 int bit, error, got, i, loc, run;
780 int32_t *lp;
781 daddr_t bno;
782
783 fs = ip->i_e2fs;
784 ump = ip->i_ump;
785
786 if (fs->e2fs_maxcluster[cg] < len)
787 return (0);
788
789 EXT2_UNLOCK(ump);
790 error = bread(ip->i_devvp,
791 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
792 (int)fs->e2fs_bsize, NOCRED, &bp);
793 if (error)
794 goto fail_lock;
795
796 bbp = (char *)bp->b_data;
797 EXT2_LOCK(ump);
798 /*
799 * Check to see if a cluster of the needed size (or bigger) is
800 * available in this cylinder group.
801 */
802 lp = &fs->e2fs_clustersum[cg].cs_sum[len];
803 for (i = len; i <= fs->e2fs_contigsumsize; i++)
804 if (*lp++ > 0)
805 break;
806 if (i > fs->e2fs_contigsumsize) {
807 /*
808 * Update the cluster summary information to reflect
809 * the true maximum-sized cluster so that future cluster
810 * allocation requests can avoid reading the bitmap only
811 * to find no cluster.
812 */
813 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1];
814 for (i = len - 1; i > 0; i--)
815 if (*lp-- > 0)
816 break;
817 fs->e2fs_maxcluster[cg] = i;
818 goto fail;
819 }
820 EXT2_UNLOCK(ump);
821
822 /* Search the bitmap to find a big enough cluster like in FFS. */
823 if (dtog(fs, bpref) != cg)
824 bpref = 0;
825 if (bpref != 0)
826 bpref = dtogd(fs, bpref);
827 loc = bpref / NBBY;
828 bit = 1 << (bpref % NBBY);
829 for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) {
830 if ((bbp[loc] & bit) != 0)
831 run = 0;
832 else {
833 run++;
834 if (run == len)
835 break;
836 }
837 if ((got & (NBBY - 1)) != (NBBY - 1))
838 bit <<= 1;
839 else {
840 loc++;
841 bit = 1;
842 }
843 }
844
845 if (got >= fs->e2fs->e2fs_fpg)
846 goto fail_lock;
847
848 /* Allocate the cluster that we found. */
849 for (i = 1; i < len; i++)
850 if (!isclr(bbp, got - run + i))
851 panic("ext2_clusteralloc: map mismatch");
852
853 bno = got - run + 1;
854 if (bno >= fs->e2fs->e2fs_fpg)
855 panic("ext2_clusteralloc: allocated out of group");
856
857 EXT2_LOCK(ump);
858 for (i = 0; i < len; i += fs->e2fs_fpb) {
859 setbit(bbp, bno + i);
860 ext2_clusteracct(fs, bbp, cg, bno + i, -1);
861 fs->e2fs->e2fs_fbcount--;
862 fs->e2fs_gd[cg].ext2bgd_nbfree--;
863 }
864 fs->e2fs_fmod = 1;
865 EXT2_UNLOCK(ump);
866
867 bdwrite(bp);
868 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
869
870fail_lock:
871 EXT2_LOCK(ump);
872fail:
873 brelse(bp);
874 return (0);
875}
876
877/*
878 * Determine whether an inode can be allocated.
879 *
880 * Check to see if an inode is available, and if it is,
881 * allocate it using tode in the specified cylinder group.
882 */
883static daddr_t
884ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
885{
886 struct m_ext2fs *fs;
887 struct buf *bp;
888 struct ext2mount *ump;
889 int error, start, len;
890 char *ibp, *loc;
891 ipref--; /* to avoid a lot of (ipref -1) */
892 if (ipref == -1)
893 ipref = 0;
894 fs = ip->i_e2fs;
895 ump = ip->i_ump;
896 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
897 return (0);
898 EXT2_UNLOCK(ump);
899 error = bread(ip->i_devvp, fsbtodb(fs,
900 fs->e2fs_gd[cg].ext2bgd_i_bitmap),
901 (int)fs->e2fs_bsize, NOCRED, &bp);
902 if (error) {
903 brelse(bp);
904 EXT2_LOCK(ump);
905 return (0);
906 }
907 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) {
908 /*
909 * Another thread allocated the last i-node in this
910 * group while we were waiting for the buffer.
911 */
912 brelse(bp);
913 EXT2_LOCK(ump);
914 return (0);
915 }
916 ibp = (char *)bp->b_data;
917 if (ipref) {
918 ipref %= fs->e2fs->e2fs_ipg;
919 if (isclr(ibp, ipref))
920 goto gotit;
921 }
922 start = ipref / NBBY;
923 len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY);
924 loc = memcchr(&ibp[start], 0xff, len);
925 if (loc == NULL) {
926 len = start + 1;
927 start = 0;
928 loc = memcchr(&ibp[start], 0xff, len);
929 if (loc == NULL) {
930 printf("cg = %d, ipref = %lld, fs = %s\n",
931 cg, (long long)ipref, fs->e2fs_fsmnt);
932 panic("ext2fs_nodealloccg: map corrupted");
933 /* NOTREACHED */
934 }
935 }
936 ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1;
937gotit:
938 setbit(ibp, ipref);
939 EXT2_LOCK(ump);
940 fs->e2fs_gd[cg].ext2bgd_nifree--;
941 fs->e2fs->e2fs_ficount--;
942 fs->e2fs_fmod = 1;
943 if ((mode & IFMT) == IFDIR) {
944 fs->e2fs_gd[cg].ext2bgd_ndirs++;
945 fs->e2fs_total_dir++;
946 }
947 EXT2_UNLOCK(ump);
948 bdwrite(bp);
949 return (cg * fs->e2fs->e2fs_ipg + ipref +1);
950}
951
952/*
953 * Free a block or fragment.
954 *
955 */
956void
957ext2_blkfree(struct inode *ip, int32_t bno, long size)
958{
959 struct m_ext2fs *fs;
960 struct buf *bp;
961 struct ext2mount *ump;
962 int cg, error;
963 char *bbp;
964
965 fs = ip->i_e2fs;
966 ump = ip->i_ump;
967 cg = dtog(fs, bno);
968 if ((u_int)bno >= fs->e2fs->e2fs_bcount) {
969 printf("bad block %lld, ino %llu\n", (long long)bno,
970 (unsigned long long)ip->i_number);
971 ext2_fserr(fs, ip->i_uid, "bad block");
972 return;
973 }
974 error = bread(ip->i_devvp,
975 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
976 (int)fs->e2fs_bsize, NOCRED, &bp);
977 if (error) {
978 brelse(bp);
979 return;
980 }
981 bbp = (char *)bp->b_data;
982 bno = dtogd(fs, bno);
983 if (isclr(bbp, bno)) {
984 printf("block = %lld, fs = %s\n",
985 (long long)bno, fs->e2fs_fsmnt);
986 panic("ext2_blkfree: freeing free block");
987 }
988 clrbit(bbp, bno);
989 EXT2_LOCK(ump);
990 ext2_clusteracct(fs, bbp, cg, bno, 1);
991 fs->e2fs->e2fs_fbcount++;
992 fs->e2fs_gd[cg].ext2bgd_nbfree++;
993 fs->e2fs_fmod = 1;
994 EXT2_UNLOCK(ump);
995 bdwrite(bp);
996}
997
998/*
999 * Free an inode.
1000 *
1001 */
1002int
1003ext2_vfree(struct vnode *pvp, ino_t ino, int mode)
1004{
1005 struct m_ext2fs *fs;
1006 struct inode *pip;
1007 struct buf *bp;
1008 struct ext2mount *ump;
1009 int error, cg;
1010 char * ibp;
1011
1012 pip = VTOI(pvp);
1013 fs = pip->i_e2fs;
1014 ump = pip->i_ump;
1015 if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount)
1016 panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s",
1017 pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt);
1018
1019 cg = ino_to_cg(fs, ino);
1020 error = bread(pip->i_devvp,
1021 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
1022 (int)fs->e2fs_bsize, NOCRED, &bp);
1023 if (error) {
1024 brelse(bp);
1025 return (0);
1026 }
1027 ibp = (char *)bp->b_data;
1028 ino = (ino - 1) % fs->e2fs->e2fs_ipg;
1029 if (isclr(ibp, ino)) {
1030 printf("ino = %llu, fs = %s\n",
1031 (unsigned long long)ino, fs->e2fs_fsmnt);
1032 if (fs->e2fs_ronly == 0)
1033 panic("ext2_vfree: freeing free inode");
1034 }
1035 clrbit(ibp, ino);
1036 EXT2_LOCK(ump);
1037 fs->e2fs->e2fs_ficount++;
1038 fs->e2fs_gd[cg].ext2bgd_nifree++;
1039 if ((mode & IFMT) == IFDIR) {
1040 fs->e2fs_gd[cg].ext2bgd_ndirs--;
1041 fs->e2fs_total_dir--;
1042 }
1043 fs->e2fs_fmod = 1;
1044 EXT2_UNLOCK(ump);
1045 bdwrite(bp);
1046 return (0);
1047}
1048
1049/*
1050 * Find a block in the specified cylinder group.
1051 *
1052 * It is a panic if a request is made to find a block if none are
1053 * available.
1054 */
1055static daddr_t
1056ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref)
1057{
1058 char *loc;
1059 int start, len;
1060
1061 /*
1062 * find the fragment by searching through the free block
1063 * map for an appropriate bit pattern
1064 */
1065 if (bpref)
1066 start = dtogd(fs, bpref) / NBBY;
1067 else
1068 start = 0;
1069 len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
1070 loc = memcchr(&bbp[start], 0xff, len);
1071 if (loc == NULL) {
1072 len = start + 1;
1073 start = 0;
1074 loc = memcchr(&bbp[start], 0xff, len);
1075 if (loc == NULL) {
1076 printf("start = %d, len = %d, fs = %s\n",
1077 start, len, fs->e2fs_fsmnt);
1078 panic("ext2_mapsearch: map corrupted");
1079 /* NOTREACHED */
1080 }
1081 }
1082 return ((loc - bbp) * NBBY + ffs(~*loc) - 1);
1083}
1084
1085/*
1086 * Fserr prints the name of a filesystem with an error diagnostic.
1087 *
1088 * The form of the error message is:
1089 * fs: error message
1090 */
1091static void
1092ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp)
1093{
1094
1095 log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp);
1096}
1097
1098int
1099cg_has_sb(int i)
1100{
1101 int a3, a5, a7;
1102
1103 if (i == 0 || i == 1)
1104 return 1;
1105 for (a3 = 3, a5 = 5, a7 = 7;
1106 a3 <= i || a5 <= i || a7 <= i;
1107 a3 *= 3, a5 *= 5, a7 *= 7)
1108 if (i == a3 || i == a5 || i == a7)
1109 return 1;
1110 return 0;
1111}
555 int32_t blocknr)
556{
557 int tmp;
558 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
559
560 /* if the next block is actually what we thought it is,
561 then set the goal to what we thought it should be
562 */
563 if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0)
564 return ip->i_next_alloc_goal;
565
566 /* now check whether we were provided with an array that basically
567 tells us previous blocks to which we want to stay closeby
568 */
569 if (bap)
570 for (tmp = indx - 1; tmp >= 0; tmp--)
571 if (bap[tmp])
572 return bap[tmp];
573
574 /* else let's fall back to the blocknr, or, if there is none,
575 follow the rule that a block should be allocated near its inode
576 */
577 return blocknr ? blocknr :
578 (int32_t)(ip->i_block_group *
579 EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
580 ip->i_e2fs->e2fs->e2fs_first_dblock;
581}
582
583/*
584 * Implement the cylinder overflow algorithm.
585 *
586 * The policy implemented by this algorithm is:
587 * 1) allocate the block in its requested cylinder group.
588 * 2) quadradically rehash on the cylinder group number.
589 * 3) brute force search for a free block.
590 */
591static u_long
592ext2_hashalloc(struct inode *ip, int cg, long pref, int size,
593 daddr_t (*allocator)(struct inode *, int, daddr_t, int))
594{
595 struct m_ext2fs *fs;
596 ino_t result;
597 int i, icg = cg;
598
599 mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
600 fs = ip->i_e2fs;
601 /*
602 * 1: preferred cylinder group
603 */
604 result = (*allocator)(ip, cg, pref, size);
605 if (result)
606 return (result);
607 /*
608 * 2: quadratic rehash
609 */
610 for (i = 1; i < fs->e2fs_gcount; i *= 2) {
611 cg += i;
612 if (cg >= fs->e2fs_gcount)
613 cg -= fs->e2fs_gcount;
614 result = (*allocator)(ip, cg, 0, size);
615 if (result)
616 return (result);
617 }
618 /*
619 * 3: brute force search
620 * Note that we start at i == 2, since 0 was checked initially,
621 * and 1 is always checked in the quadratic rehash.
622 */
623 cg = (icg + 2) % fs->e2fs_gcount;
624 for (i = 2; i < fs->e2fs_gcount; i++) {
625 result = (*allocator)(ip, cg, 0, size);
626 if (result)
627 return (result);
628 cg++;
629 if (cg == fs->e2fs_gcount)
630 cg = 0;
631 }
632 return (0);
633}
634
635/*
636 * Determine whether a block can be allocated.
637 *
638 * Check to see if a block of the appropriate size is available,
639 * and if it is, allocate it.
640 */
641static daddr_t
642ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
643{
644 struct m_ext2fs *fs;
645 struct buf *bp;
646 struct ext2mount *ump;
647 daddr_t bno, runstart, runlen;
648 int bit, loc, end, error, start;
649 char *bbp;
650 /* XXX ondisk32 */
651 fs = ip->i_e2fs;
652 ump = ip->i_ump;
653 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
654 return (0);
655 EXT2_UNLOCK(ump);
656 error = bread(ip->i_devvp, fsbtodb(fs,
657 fs->e2fs_gd[cg].ext2bgd_b_bitmap),
658 (int)fs->e2fs_bsize, NOCRED, &bp);
659 if (error) {
660 brelse(bp);
661 EXT2_LOCK(ump);
662 return (0);
663 }
664 if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
665 /*
666 * Another thread allocated the last block in this
667 * group while we were waiting for the buffer.
668 */
669 brelse(bp);
670 EXT2_LOCK(ump);
671 return (0);
672 }
673 bbp = (char *)bp->b_data;
674
675 if (dtog(fs, bpref) != cg)
676 bpref = 0;
677 if (bpref != 0) {
678 bpref = dtogd(fs, bpref);
679 /*
680 * if the requested block is available, use it
681 */
682 if (isclr(bbp, bpref)) {
683 bno = bpref;
684 goto gotit;
685 }
686 }
687 /*
688 * no blocks in the requested cylinder, so take next
689 * available one in this cylinder group.
690 * first try to get 8 contigous blocks, then fall back to a single
691 * block.
692 */
693 if (bpref)
694 start = dtogd(fs, bpref) / NBBY;
695 else
696 start = 0;
697 end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
698retry:
699 runlen = 0;
700 runstart = 0;
701 for (loc = start; loc < end; loc++) {
702 if (bbp[loc] == (char)0xff) {
703 runlen = 0;
704 continue;
705 }
706
707 /* Start of a run, find the number of high clear bits. */
708 if (runlen == 0) {
709 bit = fls(bbp[loc]);
710 runlen = NBBY - bit;
711 runstart = loc * NBBY + bit;
712 } else if (bbp[loc] == 0) {
713 /* Continue a run. */
714 runlen += NBBY;
715 } else {
716 /*
717 * Finish the current run. If it isn't long
718 * enough, start a new one.
719 */
720 bit = ffs(bbp[loc]) - 1;
721 runlen += bit;
722 if (runlen >= 8) {
723 bno = runstart;
724 goto gotit;
725 }
726
727 /* Run was too short, start a new one. */
728 bit = fls(bbp[loc]);
729 runlen = NBBY - bit;
730 runstart = loc * NBBY + bit;
731 }
732
733 /* If the current run is long enough, use it. */
734 if (runlen >= 8) {
735 bno = runstart;
736 goto gotit;
737 }
738 }
739 if (start != 0) {
740 end = start;
741 start = 0;
742 goto retry;
743 }
744
745 bno = ext2_mapsearch(fs, bbp, bpref);
746 if (bno < 0){
747 brelse(bp);
748 EXT2_LOCK(ump);
749 return (0);
750 }
751gotit:
752#ifdef INVARIANTS
753 if (isset(bbp, bno)) {
754 printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n",
755 cg, (intmax_t)bno, fs->e2fs_fsmnt);
756 panic("ext2fs_alloccg: dup alloc");
757 }
758#endif
759 setbit(bbp, bno);
760 EXT2_LOCK(ump);
761 ext2_clusteracct(fs, bbp, cg, bno, -1);
762 fs->e2fs->e2fs_fbcount--;
763 fs->e2fs_gd[cg].ext2bgd_nbfree--;
764 fs->e2fs_fmod = 1;
765 EXT2_UNLOCK(ump);
766 bdwrite(bp);
767 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
768}
769
770/*
771 * Determine whether a cluster can be allocated.
772 */
773static daddr_t
774ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len)
775{
776 struct m_ext2fs *fs;
777 struct ext2mount *ump;
778 struct buf *bp;
779 char *bbp;
780 int bit, error, got, i, loc, run;
781 int32_t *lp;
782 daddr_t bno;
783
784 fs = ip->i_e2fs;
785 ump = ip->i_ump;
786
787 if (fs->e2fs_maxcluster[cg] < len)
788 return (0);
789
790 EXT2_UNLOCK(ump);
791 error = bread(ip->i_devvp,
792 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
793 (int)fs->e2fs_bsize, NOCRED, &bp);
794 if (error)
795 goto fail_lock;
796
797 bbp = (char *)bp->b_data;
798 EXT2_LOCK(ump);
799 /*
800 * Check to see if a cluster of the needed size (or bigger) is
801 * available in this cylinder group.
802 */
803 lp = &fs->e2fs_clustersum[cg].cs_sum[len];
804 for (i = len; i <= fs->e2fs_contigsumsize; i++)
805 if (*lp++ > 0)
806 break;
807 if (i > fs->e2fs_contigsumsize) {
808 /*
809 * Update the cluster summary information to reflect
810 * the true maximum-sized cluster so that future cluster
811 * allocation requests can avoid reading the bitmap only
812 * to find no cluster.
813 */
814 lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1];
815 for (i = len - 1; i > 0; i--)
816 if (*lp-- > 0)
817 break;
818 fs->e2fs_maxcluster[cg] = i;
819 goto fail;
820 }
821 EXT2_UNLOCK(ump);
822
823 /* Search the bitmap to find a big enough cluster like in FFS. */
824 if (dtog(fs, bpref) != cg)
825 bpref = 0;
826 if (bpref != 0)
827 bpref = dtogd(fs, bpref);
828 loc = bpref / NBBY;
829 bit = 1 << (bpref % NBBY);
830 for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) {
831 if ((bbp[loc] & bit) != 0)
832 run = 0;
833 else {
834 run++;
835 if (run == len)
836 break;
837 }
838 if ((got & (NBBY - 1)) != (NBBY - 1))
839 bit <<= 1;
840 else {
841 loc++;
842 bit = 1;
843 }
844 }
845
846 if (got >= fs->e2fs->e2fs_fpg)
847 goto fail_lock;
848
849 /* Allocate the cluster that we found. */
850 for (i = 1; i < len; i++)
851 if (!isclr(bbp, got - run + i))
852 panic("ext2_clusteralloc: map mismatch");
853
854 bno = got - run + 1;
855 if (bno >= fs->e2fs->e2fs_fpg)
856 panic("ext2_clusteralloc: allocated out of group");
857
858 EXT2_LOCK(ump);
859 for (i = 0; i < len; i += fs->e2fs_fpb) {
860 setbit(bbp, bno + i);
861 ext2_clusteracct(fs, bbp, cg, bno + i, -1);
862 fs->e2fs->e2fs_fbcount--;
863 fs->e2fs_gd[cg].ext2bgd_nbfree--;
864 }
865 fs->e2fs_fmod = 1;
866 EXT2_UNLOCK(ump);
867
868 bdwrite(bp);
869 return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
870
871fail_lock:
872 EXT2_LOCK(ump);
873fail:
874 brelse(bp);
875 return (0);
876}
877
878/*
879 * Determine whether an inode can be allocated.
880 *
881 * Check to see if an inode is available, and if it is,
882 * allocate it using tode in the specified cylinder group.
883 */
884static daddr_t
885ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
886{
887 struct m_ext2fs *fs;
888 struct buf *bp;
889 struct ext2mount *ump;
890 int error, start, len;
891 char *ibp, *loc;
892 ipref--; /* to avoid a lot of (ipref -1) */
893 if (ipref == -1)
894 ipref = 0;
895 fs = ip->i_e2fs;
896 ump = ip->i_ump;
897 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
898 return (0);
899 EXT2_UNLOCK(ump);
900 error = bread(ip->i_devvp, fsbtodb(fs,
901 fs->e2fs_gd[cg].ext2bgd_i_bitmap),
902 (int)fs->e2fs_bsize, NOCRED, &bp);
903 if (error) {
904 brelse(bp);
905 EXT2_LOCK(ump);
906 return (0);
907 }
908 if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) {
909 /*
910 * Another thread allocated the last i-node in this
911 * group while we were waiting for the buffer.
912 */
913 brelse(bp);
914 EXT2_LOCK(ump);
915 return (0);
916 }
917 ibp = (char *)bp->b_data;
918 if (ipref) {
919 ipref %= fs->e2fs->e2fs_ipg;
920 if (isclr(ibp, ipref))
921 goto gotit;
922 }
923 start = ipref / NBBY;
924 len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY);
925 loc = memcchr(&ibp[start], 0xff, len);
926 if (loc == NULL) {
927 len = start + 1;
928 start = 0;
929 loc = memcchr(&ibp[start], 0xff, len);
930 if (loc == NULL) {
931 printf("cg = %d, ipref = %lld, fs = %s\n",
932 cg, (long long)ipref, fs->e2fs_fsmnt);
933 panic("ext2fs_nodealloccg: map corrupted");
934 /* NOTREACHED */
935 }
936 }
937 ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1;
938gotit:
939 setbit(ibp, ipref);
940 EXT2_LOCK(ump);
941 fs->e2fs_gd[cg].ext2bgd_nifree--;
942 fs->e2fs->e2fs_ficount--;
943 fs->e2fs_fmod = 1;
944 if ((mode & IFMT) == IFDIR) {
945 fs->e2fs_gd[cg].ext2bgd_ndirs++;
946 fs->e2fs_total_dir++;
947 }
948 EXT2_UNLOCK(ump);
949 bdwrite(bp);
950 return (cg * fs->e2fs->e2fs_ipg + ipref +1);
951}
952
953/*
954 * Free a block or fragment.
955 *
956 */
957void
958ext2_blkfree(struct inode *ip, int32_t bno, long size)
959{
960 struct m_ext2fs *fs;
961 struct buf *bp;
962 struct ext2mount *ump;
963 int cg, error;
964 char *bbp;
965
966 fs = ip->i_e2fs;
967 ump = ip->i_ump;
968 cg = dtog(fs, bno);
969 if ((u_int)bno >= fs->e2fs->e2fs_bcount) {
970 printf("bad block %lld, ino %llu\n", (long long)bno,
971 (unsigned long long)ip->i_number);
972 ext2_fserr(fs, ip->i_uid, "bad block");
973 return;
974 }
975 error = bread(ip->i_devvp,
976 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
977 (int)fs->e2fs_bsize, NOCRED, &bp);
978 if (error) {
979 brelse(bp);
980 return;
981 }
982 bbp = (char *)bp->b_data;
983 bno = dtogd(fs, bno);
984 if (isclr(bbp, bno)) {
985 printf("block = %lld, fs = %s\n",
986 (long long)bno, fs->e2fs_fsmnt);
987 panic("ext2_blkfree: freeing free block");
988 }
989 clrbit(bbp, bno);
990 EXT2_LOCK(ump);
991 ext2_clusteracct(fs, bbp, cg, bno, 1);
992 fs->e2fs->e2fs_fbcount++;
993 fs->e2fs_gd[cg].ext2bgd_nbfree++;
994 fs->e2fs_fmod = 1;
995 EXT2_UNLOCK(ump);
996 bdwrite(bp);
997}
998
999/*
1000 * Free an inode.
1001 *
1002 */
1003int
1004ext2_vfree(struct vnode *pvp, ino_t ino, int mode)
1005{
1006 struct m_ext2fs *fs;
1007 struct inode *pip;
1008 struct buf *bp;
1009 struct ext2mount *ump;
1010 int error, cg;
1011 char * ibp;
1012
1013 pip = VTOI(pvp);
1014 fs = pip->i_e2fs;
1015 ump = pip->i_ump;
1016 if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount)
1017 panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s",
1018 pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt);
1019
1020 cg = ino_to_cg(fs, ino);
1021 error = bread(pip->i_devvp,
1022 fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
1023 (int)fs->e2fs_bsize, NOCRED, &bp);
1024 if (error) {
1025 brelse(bp);
1026 return (0);
1027 }
1028 ibp = (char *)bp->b_data;
1029 ino = (ino - 1) % fs->e2fs->e2fs_ipg;
1030 if (isclr(ibp, ino)) {
1031 printf("ino = %llu, fs = %s\n",
1032 (unsigned long long)ino, fs->e2fs_fsmnt);
1033 if (fs->e2fs_ronly == 0)
1034 panic("ext2_vfree: freeing free inode");
1035 }
1036 clrbit(ibp, ino);
1037 EXT2_LOCK(ump);
1038 fs->e2fs->e2fs_ficount++;
1039 fs->e2fs_gd[cg].ext2bgd_nifree++;
1040 if ((mode & IFMT) == IFDIR) {
1041 fs->e2fs_gd[cg].ext2bgd_ndirs--;
1042 fs->e2fs_total_dir--;
1043 }
1044 fs->e2fs_fmod = 1;
1045 EXT2_UNLOCK(ump);
1046 bdwrite(bp);
1047 return (0);
1048}
1049
1050/*
1051 * Find a block in the specified cylinder group.
1052 *
1053 * It is a panic if a request is made to find a block if none are
1054 * available.
1055 */
1056static daddr_t
1057ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref)
1058{
1059 char *loc;
1060 int start, len;
1061
1062 /*
1063 * find the fragment by searching through the free block
1064 * map for an appropriate bit pattern
1065 */
1066 if (bpref)
1067 start = dtogd(fs, bpref) / NBBY;
1068 else
1069 start = 0;
1070 len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
1071 loc = memcchr(&bbp[start], 0xff, len);
1072 if (loc == NULL) {
1073 len = start + 1;
1074 start = 0;
1075 loc = memcchr(&bbp[start], 0xff, len);
1076 if (loc == NULL) {
1077 printf("start = %d, len = %d, fs = %s\n",
1078 start, len, fs->e2fs_fsmnt);
1079 panic("ext2_mapsearch: map corrupted");
1080 /* NOTREACHED */
1081 }
1082 }
1083 return ((loc - bbp) * NBBY + ffs(~*loc) - 1);
1084}
1085
1086/*
1087 * Fserr prints the name of a filesystem with an error diagnostic.
1088 *
1089 * The form of the error message is:
1090 * fs: error message
1091 */
1092static void
1093ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp)
1094{
1095
1096 log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp);
1097}
1098
1099int
1100cg_has_sb(int i)
1101{
1102 int a3, a5, a7;
1103
1104 if (i == 0 || i == 1)
1105 return 1;
1106 for (a3 = 3, a5 = 5, a7 = 7;
1107 a3 <= i || a5 <= i || a7 <= i;
1108 a3 *= 3, a5 *= 5, a7 *= 7)
1109 if (i == a3 || i == a5 || i == a7)
1110 return 1;
1111 return 0;
1112}